--- /dev/null
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/x86 4.4.8 Kernel Configuration
+#
+# CONFIG_64BIT is not set
+CONFIG_X86_32=y
+CONFIG_X86=y
+CONFIG_INSTRUCTION_DECODER=y
+CONFIG_PERF_EVENTS_INTEL_UNCORE=y
+CONFIG_OUTPUT_FORMAT="elf32-i386"
+CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_MMU=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
+CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
+CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
+CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_X86_32_SMP=y
+CONFIG_X86_32_LAZY_GS=y
+CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-ecx -fcall-saved-edx"
+CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_PGTABLE_LEVELS=2
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_IRQ_WORK=y
+CONFIG_BUILDTIME_EXTABLE_SORT=y
+
+#
+# General setup
+#
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+# CONFIG_COMPILE_TEST is not set
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_XZ=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_HAVE_KERNEL_LZ4=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_XZ is not set
+# CONFIG_KERNEL_LZO is not set
+# CONFIG_KERNEL_LZ4 is not set
+CONFIG_DEFAULT_HOSTNAME="(none)"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_CROSS_MEMORY_ATTACH=y
+CONFIG_FHANDLE=y
+CONFIG_USELIB=y
+CONFIG_AUDIT=y
+CONFIG_HAVE_ARCH_AUDITSYSCALL=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_WATCH=y
+CONFIG_AUDIT_TREE=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_PENDING_IRQ=y
+CONFIG_IRQ_DOMAIN=y
+CONFIG_IRQ_DOMAIN_HIERARCHY=y
+CONFIG_GENERIC_MSI_IRQ=y
+CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
+# CONFIG_IRQ_DOMAIN_DEBUG is not set
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_ARCH_CLOCKSOURCE_DATA=y
+CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+
+#
+# Timers subsystem
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ_COMMON=y
+# CONFIG_HZ_PERIODIC is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+
+#
+# CPU/Task time and stats accounting
+#
+CONFIG_TICK_CPU_ACCOUNTING=y
+# CONFIG_IRQ_TIME_ACCOUNTING is not set
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+
+#
+# RCU Subsystem
+#
+CONFIG_PREEMPT_RCU=y
+# CONFIG_RCU_EXPERT is not set
+CONFIG_SRCU=y
+# CONFIG_TASKS_RCU is not set
+CONFIG_RCU_STALL_COMMON=y
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_RCU_EXPEDITE_BOOT is not set
+# CONFIG_BUILD_BIN2C is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
+CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
+CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_FREEZER=y
+# CONFIG_CGROUP_PIDS is not set
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_PAGE_COUNTER=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_MEMCG_SWAP_ENABLED=y
+CONFIG_MEMCG_KMEM=y
+# CONFIG_CGROUP_HUGETLB is not set
+# CONFIG_CGROUP_PERF is not set
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_CFS_BANDWIDTH is not set
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_BLK_CGROUP=y
+# CONFIG_DEBUG_BLK_CGROUP is not set
+CONFIG_CGROUP_WRITEBACK=y
+# CONFIG_CHECKPOINT_RESTORE is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+CONFIG_USER_NS=y
+CONFIG_PID_NS=y
+CONFIG_NET_NS=y
+# CONFIG_SCHED_AUTOGROUP is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="ramfs/initramfs.i386"
+CONFIG_INITRAMFS_ROOT_UID=0
+CONFIG_INITRAMFS_ROOT_GID=0
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_XZ=y
+CONFIG_RD_LZO=y
+CONFIG_RD_LZ4=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_HAVE_UID16=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_HAVE_PCSPKR_PLATFORM=y
+CONFIG_BPF=y
+# CONFIG_EXPERT is not set
+CONFIG_UID16=y
+CONFIG_MULTIUSER=y
+CONFIG_SGETMASK_SYSCALL=y
+CONFIG_SYSFS_SYSCALL=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+# CONFIG_BPF_SYSCALL is not set
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_ADVISE_SYSCALLS=y
+# CONFIG_USERFAULTFD is not set
+CONFIG_PCI_QUIRKS=y
+CONFIG_MEMBARRIER=y
+# CONFIG_EMBEDDED is not set
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+CONFIG_SLUB_CPU_PARTIAL=y
+# CONFIG_SYSTEM_DATA_VERIFICATION is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_KEXEC_CORE=y
+CONFIG_OPROFILE=y
+# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_OPROFILE_NMI_TIMER=y
+CONFIG_KPROBES=y
+# CONFIG_JUMP_LABEL is not set
+CONFIG_KPROBES_ON_FTRACE=y
+CONFIG_UPROBES=y
+# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_ARCH_USE_BUILTIN_BSWAP=y
+CONFIG_KRETPROBES=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_OPTPROBES=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
+CONFIG_HAVE_USER_RETURN_NOTIFIER=y
+CONFIG_HAVE_PERF_EVENTS_NMI=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
+CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
+CONFIG_HAVE_CMPXCHG_LOCAL=y
+CONFIG_HAVE_CMPXCHG_DOUBLE=y
+CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_HAVE_CC_STACKPROTECTOR=y
+# CONFIG_CC_STACKPROTECTOR is not set
+CONFIG_CC_STACKPROTECTOR_NONE=y
+# CONFIG_CC_STACKPROTECTOR_REGULAR is not set
+# CONFIG_CC_STACKPROTECTOR_STRONG is not set
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_MODULES_USE_ELF_REL=y
+CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
+CONFIG_HAVE_COPY_THREAD_TLS=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_OLD_SIGACTION=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_MODULE_SIG is not set
+# CONFIG_MODULE_COMPRESS is not set
+CONFIG_MODULES_TREE_LOOKUP=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_BSGLIB is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+CONFIG_BLK_DEV_THROTTLING=y
+# CONFIG_BLK_CMDLINE_PARSER is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_AIX_PARTITION is not set
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+# CONFIG_ATARI_PARTITION is not set
+CONFIG_MAC_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_LDM_PARTITION is not set
+CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+# CONFIG_CMDLINE_PARTITION is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+CONFIG_UNINLINE_SPIN_UNLOCK=y
+CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_RWSEM_SPIN_ON_OWNER=y
+CONFIG_LOCK_SPIN_ON_OWNER=y
+CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
+CONFIG_QUEUED_SPINLOCKS=y
+CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
+CONFIG_QUEUED_RWLOCKS=y
+CONFIG_FREEZER=y
+
+#
+# Processor type and features
+#
+CONFIG_ZONE_DMA=y
+CONFIG_SMP=y
+CONFIG_X86_FEATURE_NAMES=y
+CONFIG_X86_MPPARSE=y
+# CONFIG_X86_BIGSMP is not set
+CONFIG_X86_EXTENDED_PLATFORM=y
+# CONFIG_X86_GOLDFISH is not set
+# CONFIG_X86_INTEL_MID is not set
+# CONFIG_X86_INTEL_QUARK is not set
+# CONFIG_X86_INTEL_LPSS is not set
+# CONFIG_X86_AMD_PLATFORM_DEVICE is not set
+# CONFIG_IOSF_MBI is not set
+# CONFIG_X86_RDC321X is not set
+# CONFIG_X86_32_NON_STANDARD is not set
+CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
+# CONFIG_X86_32_IRIS is not set
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+# CONFIG_HYPERVISOR_GUEST is not set
+CONFIG_NO_BOOTMEM=y
+# CONFIG_M486 is not set
+# CONFIG_M586 is not set
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+CONFIG_M686=y
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+# CONFIG_MPENTIUM4 is not set
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MEFFICEON is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MELAN is not set
+# CONFIG_MGEODEGX1 is not set
+# CONFIG_MGEODE_LX is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_MVIAC7 is not set
+# CONFIG_MCORE2 is not set
+# CONFIG_MATOM is not set
+CONFIG_X86_GENERIC=y
+CONFIG_X86_INTERNODE_CACHE_SHIFT=6
+CONFIG_X86_L1_CACHE_SHIFT=6
+# CONFIG_X86_PPRO_FENCE is not set
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+CONFIG_X86_TSC=y
+CONFIG_X86_CMPXCHG64=y
+CONFIG_X86_CMOV=y
+CONFIG_X86_MINIMUM_CPU_FAMILY=5
+CONFIG_X86_DEBUGCTLMSR=y
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR=y
+CONFIG_CPU_SUP_TRANSMETA_32=y
+# CONFIG_HPET_TIMER is not set
+CONFIG_DMI=y
+CONFIG_NR_CPUS=8
+# CONFIG_SCHED_SMT is not set
+CONFIG_SCHED_MC=y
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_PREEMPT_COUNT=y
+CONFIG_X86_LOCAL_APIC=y
+CONFIG_X86_IO_APIC=y
+# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
+CONFIG_X86_MCE=y
+CONFIG_X86_MCE_INTEL=y
+CONFIG_X86_MCE_AMD=y
+# CONFIG_X86_ANCIENT_MCE is not set
+CONFIG_X86_MCE_THRESHOLD=y
+# CONFIG_X86_MCE_INJECT is not set
+CONFIG_X86_THERMAL_VECTOR=y
+# CONFIG_X86_LEGACY_VM86 is not set
+# CONFIG_VM86 is not set
+CONFIG_X86_16BIT=y
+CONFIG_X86_ESPFIX32=y
+# CONFIG_TOSHIBA is not set
+# CONFIG_I8K is not set
+CONFIG_X86_REBOOTFIXUPS=y
+CONFIG_MICROCODE=y
+CONFIG_MICROCODE_INTEL=y
+CONFIG_MICROCODE_AMD=y
+CONFIG_MICROCODE_OLD_INTERFACE=y
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+# CONFIG_NOHIGHMEM is not set
+CONFIG_HIGHMEM4G=y
+# CONFIG_HIGHMEM64G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_HIGHMEM=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ILLEGAL_POINTER_VALUE=0
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_SPARSEMEM_STATIC=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
+CONFIG_ARCH_DISCARD_MEMBLOCK=y
+# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_MEMORY_BALLOON=y
+# CONFIG_COMPACTION is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
+# CONFIG_MEMORY_FAILURE is not set
+# CONFIG_TRANSPARENT_HUGEPAGE is not set
+# CONFIG_CLEANCACHE is not set
+# CONFIG_FRONTSWAP is not set
+# CONFIG_CMA is not set
+# CONFIG_ZPOOL is not set
+# CONFIG_ZBUD is not set
+# CONFIG_ZSMALLOC is not set
+CONFIG_GENERIC_EARLY_IOREMAP=y
+CONFIG_ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT=y
+# CONFIG_IDLE_PAGE_TRACKING is not set
+CONFIG_HIGHPTE=y
+CONFIG_X86_CHECK_BIOS_CORRUPTION=y
+CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
+CONFIG_X86_RESERVE_LOW=64
+# CONFIG_MATH_EMULATION is not set
+CONFIG_MTRR=y
+# CONFIG_MTRR_SANITIZER is not set
+CONFIG_X86_PAT=y
+CONFIG_ARCH_USES_PG_UNCACHED=y
+CONFIG_ARCH_RANDOM=y
+CONFIG_X86_SMAP=y
+# CONFIG_X86_INTEL_MPX is not set
+CONFIG_EFI=y
+# CONFIG_EFI_STUB is not set
+# CONFIG_SECCOMP is not set
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+CONFIG_SCHED_HRTICK=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+# CONFIG_KEXEC_JUMP is not set
+CONFIG_PHYSICAL_START=0x1000000
+CONFIG_RELOCATABLE=y
+# CONFIG_RANDOMIZE_BASE is not set
+CONFIG_X86_NEED_RELOCS=y
+CONFIG_PHYSICAL_ALIGN=0x1000000
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
+# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
+# CONFIG_COMPAT_VDSO is not set
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_MODIFY_LDT_SYSCALL=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+
+#
+# Power management and ACPI options
+#
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_HIBERNATE_CALLBACKS=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION=""
+CONFIG_PM_SLEEP=y
+CONFIG_PM_SLEEP_SMP=y
+# CONFIG_PM_AUTOSLEEP is not set
+# CONFIG_PM_WAKELOCKS is not set
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+# CONFIG_PM_ADVANCED_DEBUG is not set
+# CONFIG_PM_TEST_SUSPEND is not set
+CONFIG_PM_SLEEP_DEBUG=y
+CONFIG_PM_TRACE=y
+CONFIG_PM_TRACE_RTC=y
+# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set
+CONFIG_ACPI=y
+CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y
+CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y
+CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y
+# CONFIG_ACPI_DEBUGGER is not set
+CONFIG_ACPI_SLEEP=y
+# CONFIG_ACPI_PROCFS_POWER is not set
+CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y
+# CONFIG_ACPI_EC_DEBUGFS is not set
+# CONFIG_ACPI_AC is not set
+# CONFIG_ACPI_BATTERY is not set
+CONFIG_ACPI_BUTTON=y
+CONFIG_ACPI_VIDEO=y
+CONFIG_ACPI_FAN=y
+CONFIG_ACPI_DOCK=y
+CONFIG_ACPI_CPU_FREQ_PSS=y
+CONFIG_ACPI_PROCESSOR_IDLE=y
+CONFIG_ACPI_PROCESSOR=y
+CONFIG_ACPI_HOTPLUG_CPU=y
+# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set
+CONFIG_ACPI_THERMAL=y
+# CONFIG_ACPI_CUSTOM_DSDT is not set
+# CONFIG_ACPI_INITRD_TABLE_OVERRIDE is not set
+# CONFIG_ACPI_DEBUG is not set
+CONFIG_ACPI_PCI_SLOT=y
+CONFIG_X86_PM_TIMER=y
+CONFIG_ACPI_CONTAINER=y
+CONFIG_ACPI_HOTPLUG_IOAPIC=y
+# CONFIG_ACPI_SBS is not set
+# CONFIG_ACPI_HED is not set
+# CONFIG_ACPI_CUSTOM_METHOD is not set
+# CONFIG_ACPI_BGRT is not set
+# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set
+CONFIG_HAVE_ACPI_APEI=y
+CONFIG_HAVE_ACPI_APEI_NMI=y
+# CONFIG_ACPI_APEI is not set
+# CONFIG_ACPI_EXTLOG is not set
+# CONFIG_PMIC_OPREGION is not set
+# CONFIG_SFI is not set
+# CONFIG_APM is not set
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+# CONFIG_CPU_FREQ_STAT is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+
+#
+# CPU frequency scaling drivers
+#
+# CONFIG_X86_INTEL_PSTATE is not set
+# CONFIG_X86_PCC_CPUFREQ is not set
+CONFIG_X86_ACPI_CPUFREQ=y
+CONFIG_X86_ACPI_CPUFREQ_CPB=y
+# CONFIG_X86_POWERNOW_K6 is not set
+# CONFIG_X86_POWERNOW_K7 is not set
+# CONFIG_X86_POWERNOW_K8 is not set
+# CONFIG_X86_AMD_FREQ_SENSITIVITY is not set
+# CONFIG_X86_GX_SUSPMOD is not set
+# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
+# CONFIG_X86_SPEEDSTEP_ICH is not set
+# CONFIG_X86_SPEEDSTEP_SMI is not set
+# CONFIG_X86_P4_CLOCKMOD is not set
+# CONFIG_X86_CPUFREQ_NFORCE2 is not set
+# CONFIG_X86_LONGRUN is not set
+# CONFIG_X86_LONGHAUL is not set
+# CONFIG_X86_E_POWERSAVER is not set
+
+#
+# shared options
+#
+# CONFIG_X86_SPEEDSTEP_LIB is not set
+
+#
+# CPU Idle
+#
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
+# CONFIG_INTEL_IDLE is not set
+
+#
+# Bus options (PCI etc.)
+#
+CONFIG_PCI=y
+# CONFIG_PCI_GOBIOS is not set
+# CONFIG_PCI_GOMMCONFIG is not set
+# CONFIG_PCI_GODIRECT is not set
+CONFIG_PCI_GOANY=y
+CONFIG_PCI_BIOS=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCIEPORTBUS=y
+# CONFIG_HOTPLUG_PCI_PCIE is not set
+CONFIG_PCIEAER=y
+# CONFIG_PCIE_ECRC is not set
+# CONFIG_PCIEAER_INJECT is not set
+CONFIG_PCIEASPM=y
+# CONFIG_PCIEASPM_DEBUG is not set
+CONFIG_PCIEASPM_DEFAULT=y
+# CONFIG_PCIEASPM_POWERSAVE is not set
+# CONFIG_PCIEASPM_PERFORMANCE is not set
+CONFIG_PCIE_PME=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_MSI_IRQ_DOMAIN=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
+# CONFIG_PCI_STUB is not set
+CONFIG_HT_IRQ=y
+# CONFIG_PCI_IOV is not set
+# CONFIG_PCI_PRI is not set
+# CONFIG_PCI_PASID is not set
+CONFIG_PCI_LABEL=y
+
+#
+# PCI host controller drivers
+#
+CONFIG_ISA_DMA_API=y
+# CONFIG_ISA is not set
+# CONFIG_SCx200 is not set
+# CONFIG_OLPC is not set
+# CONFIG_ALIX is not set
+# CONFIG_NET5501 is not set
+# CONFIG_GEOS is not set
+CONFIG_AMD_NB=y
+CONFIG_PCCARD=y
+CONFIG_PCMCIA=y
+CONFIG_PCMCIA_LOAD_CIS=y
+CONFIG_CARDBUS=y
+
+#
+# PC-card bridges
+#
+CONFIG_YENTA=y
+CONFIG_YENTA_O2=y
+CONFIG_YENTA_RICOH=y
+CONFIG_YENTA_TI=y
+CONFIG_YENTA_ENE_TUNE=y
+CONFIG_YENTA_TOSHIBA=y
+# CONFIG_PD6729 is not set
+# CONFIG_I82092 is not set
+CONFIG_PCCARD_NONSTATIC=y
+CONFIG_HOTPLUG_PCI=y
+# CONFIG_HOTPLUG_PCI_COMPAQ is not set
+# CONFIG_HOTPLUG_PCI_IBM is not set
+CONFIG_HOTPLUG_PCI_ACPI=y
+# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set
+# CONFIG_HOTPLUG_PCI_CPCI is not set
+# CONFIG_HOTPLUG_PCI_SHPC is not set
+# CONFIG_RAPIDIO is not set
+# CONFIG_X86_SYSFB is not set
+
+#
+# Executable file formats / Emulations
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_BINFMT_SCRIPT=y
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_COREDUMP=y
+CONFIG_HAVE_ATOMIC_IOMAP=y
+CONFIG_PMC_ATOM=y
+CONFIG_NET=y
+CONFIG_NET_INGRESS=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_DIAG is not set
+CONFIG_UNIX=y
+# CONFIG_UNIX_DIAG is not set
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_IP_FIB_TRIE_STATS is not set
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE_DEMUX is not set
+CONFIG_NET_IP_TUNNEL=y
+CONFIG_IP_MROUTE=y
+# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_NET_UDP_TUNNEL is not set
+# CONFIG_NET_FOU is not set
+# CONFIG_NET_FOU_IP_TUNNELS is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_LRO=y
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+CONFIG_TCP_CONG_CUBIC=y
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_TCP_CONG_HSTCP is not set
+# CONFIG_TCP_CONG_HYBLA is not set
+# CONFIG_TCP_CONG_VEGAS is not set
+# CONFIG_TCP_CONG_SCALABLE is not set
+# CONFIG_TCP_CONG_LP is not set
+# CONFIG_TCP_CONG_VENO is not set
+# CONFIG_TCP_CONG_YEAH is not set
+# CONFIG_TCP_CONG_ILLINOIS is not set
+# CONFIG_TCP_CONG_DCTCP is not set
+# CONFIG_TCP_CONG_CDG is not set
+CONFIG_DEFAULT_CUBIC=y
+# CONFIG_DEFAULT_RENO is not set
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6=y
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_IPV6_ILA is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+# CONFIG_IPV6_VTI is not set
+CONFIG_IPV6_SIT=y
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_GRE is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+CONFIG_NETLABEL=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NET_PTP_CLASSIFY=y
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_INGRESS=y
+CONFIG_NETFILTER_NETLINK=y
+CONFIG_NETFILTER_NETLINK_ACCT=y
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+# CONFIG_NF_CONNTRACK_MARK is not set
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_PROCFS=y
+# CONFIG_NF_CONNTRACK_EVENTS is not set
+# CONFIG_NF_CONNTRACK_TIMEOUT is not set
+# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
+# CONFIG_NF_CT_PROTO_DCCP is not set
+# CONFIG_NF_CT_PROTO_SCTP is not set
+# CONFIG_NF_CT_PROTO_UDPLITE is not set
+# CONFIG_NF_CONNTRACK_AMANDA is not set
+CONFIG_NF_CONNTRACK_FTP=y
+# CONFIG_NF_CONNTRACK_H323 is not set
+CONFIG_NF_CONNTRACK_IRC=y
+# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
+# CONFIG_NF_CONNTRACK_SNMP is not set
+# CONFIG_NF_CONNTRACK_PPTP is not set
+# CONFIG_NF_CONNTRACK_SANE is not set
+CONFIG_NF_CONNTRACK_SIP=y
+# CONFIG_NF_CONNTRACK_TFTP is not set
+CONFIG_NF_CT_NETLINK=y
+# CONFIG_NF_CT_NETLINK_TIMEOUT is not set
+# CONFIG_NETFILTER_NETLINK_GLUE_CT is not set
+CONFIG_NF_NAT=y
+CONFIG_NF_NAT_NEEDED=y
+# CONFIG_NF_NAT_AMANDA is not set
+CONFIG_NF_NAT_FTP=y
+CONFIG_NF_NAT_IRC=y
+CONFIG_NF_NAT_SIP=y
+# CONFIG_NF_NAT_TFTP is not set
+# CONFIG_NF_NAT_REDIRECT is not set
+# CONFIG_NF_TABLES is not set
+CONFIG_NETFILTER_XTABLES=y
+
+#
+# Xtables combined modules
+#
+# CONFIG_NETFILTER_XT_MARK is not set
+# CONFIG_NETFILTER_XT_CONNMARK is not set
+
+#
+# Xtables targets
+#
+# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set
+# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set
+# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
+# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+# CONFIG_NETFILTER_XT_TARGET_HL is not set
+# CONFIG_NETFILTER_XT_TARGET_HMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
+# CONFIG_NETFILTER_XT_TARGET_LOG is not set
+# CONFIG_NETFILTER_XT_TARGET_MARK is not set
+# CONFIG_NETFILTER_XT_NAT is not set
+# CONFIG_NETFILTER_XT_TARGET_NETMAP is not set
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+# CONFIG_NETFILTER_XT_TARGET_REDIRECT is not set
+# CONFIG_NETFILTER_XT_TARGET_TEE is not set
+# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+
+#
+# Xtables matches
+#
+# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_BPF is not set
+# CONFIG_NETFILTER_XT_MATCH_CGROUP is not set
+# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+# CONFIG_NETFILTER_XT_MATCH_CPU is not set
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+# CONFIG_NETFILTER_XT_MATCH_ECN is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_HELPER is not set
+# CONFIG_NETFILTER_XT_MATCH_HL is not set
+# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_MAC is not set
+# CONFIG_NETFILTER_XT_MATCH_MARK is not set
+# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set
+CONFIG_NETFILTER_XT_MATCH_NFACCT=y
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+# CONFIG_NETFILTER_XT_MATCH_SOCKET is not set
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# CONFIG_IP_SET is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+# CONFIG_NF_DUP_IPV4 is not set
+# CONFIG_NF_LOG_ARP is not set
+# CONFIG_NF_LOG_IPV4 is not set
+CONFIG_NF_REJECT_IPV4=y
+CONFIG_NF_NAT_IPV4=y
+# CONFIG_NF_NAT_MASQUERADE_IPV4 is not set
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+CONFIG_IP_NF_IPTABLES=y
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_RPFILTER is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+# CONFIG_IP_NF_TARGET_SYNPROXY is not set
+# CONFIG_IP_NF_NAT is not set
+CONFIG_IP_NF_MANGLE=y
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+# CONFIG_IP_NF_TARGET_ECN is not set
+# CONFIG_IP_NF_TARGET_TTL is not set
+# CONFIG_IP_NF_RAW is not set
+# CONFIG_IP_NF_SECURITY is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+# CONFIG_NF_DEFRAG_IPV6 is not set
+# CONFIG_NF_CONNTRACK_IPV6 is not set
+# CONFIG_NF_DUP_IPV6 is not set
+# CONFIG_NF_REJECT_IPV6 is not set
+# CONFIG_NF_LOG_IPV6 is not set
+# CONFIG_IP6_NF_IPTABLES is not set
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
+CONFIG_STP=y
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+CONFIG_HAVE_NET_DSA=y
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=y
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_PHONET is not set
+# CONFIG_6LOWPAN is not set
+# CONFIG_IEEE802154 is not set
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+# CONFIG_NET_SCH_CBQ is not set
+# CONFIG_NET_SCH_HTB is not set
+# CONFIG_NET_SCH_HFSC is not set
+# CONFIG_NET_SCH_PRIO is not set
+# CONFIG_NET_SCH_MULTIQ is not set
+# CONFIG_NET_SCH_RED is not set
+# CONFIG_NET_SCH_SFB is not set
+# CONFIG_NET_SCH_SFQ is not set
+# CONFIG_NET_SCH_TEQL is not set
+# CONFIG_NET_SCH_TBF is not set
+# CONFIG_NET_SCH_GRED is not set
+# CONFIG_NET_SCH_DSMARK is not set
+# CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_DRR is not set
+# CONFIG_NET_SCH_MQPRIO is not set
+# CONFIG_NET_SCH_CHOKE is not set
+# CONFIG_NET_SCH_QFQ is not set
+# CONFIG_NET_SCH_CODEL is not set
+# CONFIG_NET_SCH_FQ_CODEL is not set
+# CONFIG_NET_SCH_FQ is not set
+# CONFIG_NET_SCH_HHF is not set
+# CONFIG_NET_SCH_PIE is not set
+# CONFIG_NET_SCH_INGRESS is not set
+# CONFIG_NET_SCH_PLUG is not set
+
+#
+# Classification
+#
+CONFIG_NET_CLS=y
+# CONFIG_NET_CLS_BASIC is not set
+# CONFIG_NET_CLS_TCINDEX is not set
+# CONFIG_NET_CLS_ROUTE4 is not set
+# CONFIG_NET_CLS_FW is not set
+# CONFIG_NET_CLS_U32 is not set
+# CONFIG_NET_CLS_RSVP is not set
+# CONFIG_NET_CLS_RSVP6 is not set
+# CONFIG_NET_CLS_FLOW is not set
+CONFIG_NET_CLS_CGROUP=y
+# CONFIG_NET_CLS_BPF is not set
+# CONFIG_NET_CLS_FLOWER is not set
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_STACK=32
+# CONFIG_NET_EMATCH_CMP is not set
+# CONFIG_NET_EMATCH_NBYTE is not set
+# CONFIG_NET_EMATCH_U32 is not set
+# CONFIG_NET_EMATCH_META is not set
+# CONFIG_NET_EMATCH_TEXT is not set
+# CONFIG_NET_EMATCH_CANID is not set
+CONFIG_NET_CLS_ACT=y
+# CONFIG_NET_ACT_POLICE is not set
+# CONFIG_NET_ACT_GACT is not set
+# CONFIG_NET_ACT_MIRRED is not set
+# CONFIG_NET_ACT_IPT is not set
+# CONFIG_NET_ACT_NAT is not set
+# CONFIG_NET_ACT_PEDIT is not set
+# CONFIG_NET_ACT_SIMP is not set
+# CONFIG_NET_ACT_SKBEDIT is not set
+# CONFIG_NET_ACT_CSUM is not set
+# CONFIG_NET_ACT_VLAN is not set
+# CONFIG_NET_ACT_BPF is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
+CONFIG_DNS_RESOLVER=y
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_OPENVSWITCH is not set
+# CONFIG_VSOCKETS is not set
+# CONFIG_NETLINK_MMAP is not set
+# CONFIG_NETLINK_DIAG is not set
+# CONFIG_MPLS is not set
+# CONFIG_HSR is not set
+# CONFIG_NET_SWITCHDEV is not set
+# CONFIG_NET_L3_MASTER_DEV is not set
+CONFIG_RPS=y
+CONFIG_RFS_ACCEL=y
+CONFIG_XPS=y
+# CONFIG_CGROUP_NET_PRIO is not set
+CONFIG_CGROUP_NET_CLASSID=y
+CONFIG_NET_RX_BUSY_POLL=y
+CONFIG_BQL=y
+CONFIG_NET_FLOW_LIMIT=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
+CONFIG_HAMRADIO=y
+
+#
+# Packet Radio protocols
+#
+# CONFIG_AX25 is not set
+CONFIG_CAN=y
+CONFIG_CAN_RAW=y
+CONFIG_CAN_BCM=y
+CONFIG_CAN_GW=y
+
+#
+# CAN Device Drivers
+#
+CONFIG_CAN_VCAN=y
+# CONFIG_CAN_SLCAN is not set
+CONFIG_CAN_DEV=y
+CONFIG_CAN_CALC_BITTIMING=y
+# CONFIG_CAN_LEDS is not set
+# CONFIG_PCH_CAN is not set
+# CONFIG_CAN_SJA1000 is not set
+# CONFIG_CAN_C_CAN is not set
+# CONFIG_CAN_M_CAN is not set
+# CONFIG_CAN_CC770 is not set
+
+#
+# CAN USB interfaces
+#
+# CONFIG_CAN_EMS_USB is not set
+# CONFIG_CAN_ESD_USB2 is not set
+# CONFIG_CAN_GS_USB is not set
+# CONFIG_CAN_KVASER_USB is not set
+CONFIG_CAN_PEAK_USB=y
+# CONFIG_CAN_8DEV_USB is not set
+# CONFIG_CAN_SOFTING is not set
+# CONFIG_CAN_DEBUG_DEVICES is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_CFG80211=y
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_CRDA_SUPPORT=y
+CONFIG_CFG80211_WEXT=y
+# CONFIG_LIB80211 is not set
+CONFIG_MAC80211=y
+CONFIG_MAC80211_HAS_RC=y
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_MINSTREL_HT=y
+# CONFIG_MAC80211_RC_MINSTREL_VHT is not set
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
+# CONFIG_MAC80211_MESH is not set
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_MESSAGE_TRACING is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=y
+CONFIG_RFKILL_LEDS=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+# CONFIG_NET_9P_DEBUG is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+# CONFIG_NFC is not set
+# CONFIG_LWTUNNEL is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set
+CONFIG_ALLOW_DEV_COREDUMP=y
+# CONFIG_DEBUG_DRIVER is not set
+CONFIG_DEBUG_DEVRES=y
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_GENERIC_CPU_DEVICES is not set
+CONFIG_GENERIC_CPU_AUTOPROBE=y
+CONFIG_REGMAP=y
+CONFIG_DMA_SHARED_BUFFER=y
+# CONFIG_FENCE_TRACE is not set
+
+#
+# Bus devices
+#
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+# CONFIG_MTD is not set
+# CONFIG_OF is not set
+CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
+# CONFIG_PARPORT is not set
+CONFIG_PNP=y
+CONFIG_PNP_DEBUG_MESSAGES=y
+
+#
+# Protocols
+#
+CONFIG_PNPACPI=y
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_NULL_BLK is not set
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+# CONFIG_BLK_DEV_DRBD is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_VIRTIO_BLK=y
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_BLK_DEV_RSXX is not set
+# CONFIG_BLK_DEV_NVME is not set
+
+#
+# Misc devices
+#
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_DUMMY_IRQ is not set
+# CONFIG_IBM_ASM is not set
+# CONFIG_PHANTOM is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_BMP085_I2C is not set
+# CONFIG_PCH_PHUB is not set
+# CONFIG_USB_SWITCH_FSA9480 is not set
+# CONFIG_SRAM is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_SENSORS_LIS3_I2C is not set
+
+#
+# Altera FPGA firmware download module
+#
+# CONFIG_ALTERA_STAPL is not set
+# CONFIG_VMWARE_VMCI is not set
+
+#
+# Intel MIC Bus Driver
+#
+
+#
+# SCIF Bus Driver
+#
+
+#
+# Intel MIC Host Driver
+#
+
+#
+# Intel MIC Card Driver
+#
+
+#
+# SCIF Driver
+#
+
+#
+# Intel MIC Coprocessor State Management (COSM) Drivers
+#
+# CONFIG_ECHO is not set
+# CONFIG_CXL_BASE is not set
+# CONFIG_CXL_KERNEL_API is not set
+# CONFIG_CXL_EEH is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_MQ_DEFAULT is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+
+#
+# SCSI Transports
+#
+CONFIG_SCSI_SPI_ATTRS=y
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+CONFIG_ATA=y
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
+CONFIG_ATA_ACPI=y
+# CONFIG_SATA_ZPODD is not set
+CONFIG_SATA_PMP=y
+
+#
+# Controllers with non-SFF native interface
+#
+CONFIG_SATA_AHCI=y
+# CONFIG_SATA_AHCI_PLATFORM is not set
+# CONFIG_SATA_INIC162X is not set
+# CONFIG_SATA_ACARD_AHCI is not set
+# CONFIG_SATA_SIL24 is not set
+CONFIG_ATA_SFF=y
+
+#
+# SFF controllers with custom DMA interface
+#
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_SX4 is not set
+CONFIG_ATA_BMDMA=y
+
+#
+# SATA SFF controllers with BMDMA
+#
+CONFIG_ATA_PIIX=y
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_SVW is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+
+#
+# PATA SFF controllers with BMDMA
+#
+# CONFIG_PATA_ALI is not set
+CONFIG_PATA_AMD=y
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_ATP867X is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CS5535 is not set
+# CONFIG_PATA_CS5536 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT8213 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_MARVELL is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NINJA32 is not set
+# CONFIG_PATA_NS87415 is not set
+CONFIG_PATA_OLDPIIX=y
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RDC is not set
+# CONFIG_PATA_SC1200 is not set
+CONFIG_PATA_SCH=y
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_TOSHIBA is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+
+#
+# PIO-only SFF controllers
+#
+# CONFIG_PATA_CMD640_PCI is not set
+CONFIG_PATA_MPIIX=y
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_PCMCIA is not set
+# CONFIG_PATA_RZ1000 is not set
+
+#
+# Generic fallback / legacy drivers
+#
+# CONFIG_PATA_ACPI is not set
+CONFIG_ATA_GENERIC=y
+# CONFIG_PATA_LEGACY is not set
+# CONFIG_MD is not set
+# CONFIG_TARGET_CORE is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_FIREWIRE_NOSY is not set
+CONFIG_MACINTOSH_DRIVERS=y
+CONFIG_MAC_EMUMOUSEBTN=y
+CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_NET_CORE=y
+# CONFIG_BONDING is not set
+# CONFIG_DUMMY is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_NET_FC is not set
+# CONFIG_IFB is not set
+# CONFIG_NET_TEAM is not set
+CONFIG_MACVLAN=y
+# CONFIG_MACVTAP is not set
+# CONFIG_IPVLAN is not set
+# CONFIG_VXLAN is not set
+CONFIG_NETCONSOLE=y
+CONFIG_NETPOLL=y
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_TUN is not set
+# CONFIG_TUN_VNET_CROSS_LE is not set
+CONFIG_VETH=y
+CONFIG_VIRTIO_NET=y
+# CONFIG_NLMON is not set
+# CONFIG_ARCNET is not set
+
+#
+# CAIF transport drivers
+#
+# CONFIG_VHOST_NET is not set
+# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set
+
+#
+# Distributed Switch Architecture drivers
+#
+# CONFIG_NET_DSA_MV88E6XXX is not set
+# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_3COM=y
+# CONFIG_PCMCIA_3C574 is not set
+# CONFIG_PCMCIA_3C589 is not set
+# CONFIG_VORTEX is not set
+# CONFIG_TYPHOON is not set
+CONFIG_NET_VENDOR_ADAPTEC=y
+# CONFIG_ADAPTEC_STARFIRE is not set
+CONFIG_NET_VENDOR_AGERE=y
+# CONFIG_ET131X is not set
+CONFIG_NET_VENDOR_ALTEON=y
+# CONFIG_ACENIC is not set
+# CONFIG_ALTERA_TSE is not set
+CONFIG_NET_VENDOR_AMD=y
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_PCNET32 is not set
+# CONFIG_PCMCIA_NMCLAN is not set
+CONFIG_NET_VENDOR_ARC=y
+CONFIG_NET_VENDOR_ATHEROS=y
+# CONFIG_ATL2 is not set
+# CONFIG_ATL1 is not set
+# CONFIG_ATL1E is not set
+# CONFIG_ATL1C is not set
+# CONFIG_ALX is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+CONFIG_NET_CADENCE=y
+# CONFIG_MACB is not set
+CONFIG_NET_VENDOR_BROADCOM=y
+# CONFIG_B44 is not set
+# CONFIG_BCMGENET is not set
+CONFIG_BNX2=y
+# CONFIG_CNIC is not set
+CONFIG_TIGON3=y
+# CONFIG_BNX2X is not set
+# CONFIG_BNXT is not set
+CONFIG_NET_VENDOR_BROCADE=y
+# CONFIG_BNA is not set
+CONFIG_NET_VENDOR_CAVIUM=y
+CONFIG_NET_VENDOR_CHELSIO=y
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_CHELSIO_T3 is not set
+# CONFIG_CHELSIO_T4 is not set
+# CONFIG_CHELSIO_T4VF is not set
+CONFIG_NET_VENDOR_CISCO=y
+# CONFIG_ENIC is not set
+# CONFIG_CX_ECAT is not set
+# CONFIG_DNET is not set
+CONFIG_NET_VENDOR_DEC=y
+CONFIG_NET_TULIP=y
+# CONFIG_DE2104X is not set
+# CONFIG_TULIP is not set
+# CONFIG_DE4X5 is not set
+# CONFIG_WINBOND_840 is not set
+# CONFIG_DM9102 is not set
+# CONFIG_ULI526X is not set
+# CONFIG_PCMCIA_XIRCOM is not set
+CONFIG_NET_VENDOR_DLINK=y
+# CONFIG_DL2K is not set
+# CONFIG_SUNDANCE is not set
+CONFIG_NET_VENDOR_EMULEX=y
+# CONFIG_BE2NET is not set
+CONFIG_NET_VENDOR_EZCHIP=y
+CONFIG_NET_VENDOR_EXAR=y
+# CONFIG_S2IO is not set
+# CONFIG_VXGE is not set
+CONFIG_NET_VENDOR_FUJITSU=y
+# CONFIG_PCMCIA_FMVJ18X is not set
+CONFIG_NET_VENDOR_HP=y
+# CONFIG_HP100 is not set
+CONFIG_NET_VENDOR_INTEL=y
+CONFIG_E100=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+# CONFIG_IGB is not set
+# CONFIG_IGBVF is not set
+# CONFIG_IXGB is not set
+# CONFIG_IXGBE is not set
+# CONFIG_IXGBEVF is not set
+# CONFIG_I40E is not set
+# CONFIG_I40EVF is not set
+# CONFIG_FM10K is not set
+CONFIG_NET_VENDOR_I825XX=y
+# CONFIG_JME is not set
+CONFIG_NET_VENDOR_MARVELL=y
+# CONFIG_MVMDIO is not set
+# CONFIG_SKGE is not set
+CONFIG_SKY2=y
+# CONFIG_SKY2_DEBUG is not set
+CONFIG_NET_VENDOR_MELLANOX=y
+# CONFIG_MLX4_EN is not set
+# CONFIG_MLX4_CORE is not set
+# CONFIG_MLX5_CORE is not set
+# CONFIG_MLXSW_CORE is not set
+CONFIG_NET_VENDOR_MICREL=y
+# CONFIG_KS8851_MLL is not set
+# CONFIG_KSZ884X_PCI is not set
+CONFIG_NET_VENDOR_MYRI=y
+# CONFIG_MYRI10GE is not set
+# CONFIG_FEALNX is not set
+CONFIG_NET_VENDOR_NATSEMI=y
+# CONFIG_NATSEMI is not set
+# CONFIG_NS83820 is not set
+CONFIG_NET_VENDOR_8390=y
+# CONFIG_PCMCIA_AXNET is not set
+CONFIG_NE2K_PCI=y
+# CONFIG_PCMCIA_PCNET is not set
+CONFIG_NET_VENDOR_NVIDIA=y
+CONFIG_FORCEDETH=y
+CONFIG_NET_VENDOR_OKI=y
+# CONFIG_PCH_GBE is not set
+# CONFIG_ETHOC is not set
+CONFIG_NET_PACKET_ENGINE=y
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+CONFIG_NET_VENDOR_QLOGIC=y
+# CONFIG_QLA3XXX is not set
+# CONFIG_QLCNIC is not set
+# CONFIG_QLGE is not set
+# CONFIG_NETXEN_NIC is not set
+# CONFIG_QED is not set
+CONFIG_NET_VENDOR_QUALCOMM=y
+CONFIG_NET_VENDOR_REALTEK=y
+# CONFIG_8139CP is not set
+CONFIG_8139TOO=y
+# CONFIG_8139TOO_PIO is not set
+# CONFIG_8139TOO_TUNE_TWISTER is not set
+# CONFIG_8139TOO_8129 is not set
+# CONFIG_8139_OLD_RX_RESET is not set
+CONFIG_R8169=y
+CONFIG_NET_VENDOR_RENESAS=y
+CONFIG_NET_VENDOR_RDC=y
+# CONFIG_R6040 is not set
+CONFIG_NET_VENDOR_ROCKER=y
+CONFIG_NET_VENDOR_SAMSUNG=y
+# CONFIG_SXGBE_ETH is not set
+CONFIG_NET_VENDOR_SEEQ=y
+CONFIG_NET_VENDOR_SILAN=y
+# CONFIG_SC92031 is not set
+CONFIG_NET_VENDOR_SIS=y
+# CONFIG_SIS900 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SFC is not set
+CONFIG_NET_VENDOR_SMSC=y
+# CONFIG_PCMCIA_SMC91C92 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_SMSC9420 is not set
+CONFIG_NET_VENDOR_STMICRO=y
+# CONFIG_STMMAC_ETH is not set
+CONFIG_NET_VENDOR_SUN=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NIU is not set
+CONFIG_NET_VENDOR_SYNOPSYS=y
+CONFIG_NET_VENDOR_TEHUTI=y
+# CONFIG_TEHUTI is not set
+CONFIG_NET_VENDOR_TI=y
+# CONFIG_TI_CPSW_ALE is not set
+# CONFIG_TLAN is not set
+CONFIG_NET_VENDOR_VIA=y
+# CONFIG_VIA_RHINE is not set
+# CONFIG_VIA_VELOCITY is not set
+CONFIG_NET_VENDOR_WIZNET=y
+# CONFIG_WIZNET_W5100 is not set
+# CONFIG_WIZNET_W5300 is not set
+CONFIG_NET_VENDOR_XIRCOM=y
+# CONFIG_PCMCIA_XIRC2PS is not set
+CONFIG_FDDI=y
+# CONFIG_DEFXX is not set
+# CONFIG_SKFP is not set
+# CONFIG_HIPPI is not set
+# CONFIG_NET_SB1000 is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_AQUANTIA_PHY is not set
+# CONFIG_AT803X_PHY is not set
+# CONFIG_AMD_PHY is not set
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_TERANETICS_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_BCM7XXX_PHY is not set
+# CONFIG_BCM87XX_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_DP83848_PHY is not set
+# CONFIG_DP83867_PHY is not set
+# CONFIG_MICROCHIP_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+# CONFIG_MDIO_BCM_UNIMAC is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+CONFIG_USB_NET_DRIVERS=y
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_RTL8152 is not set
+# CONFIG_USB_LAN78XX is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_USB_HSO is not set
+# CONFIG_USB_IPHETH is not set
+CONFIG_WLAN=y
+# CONFIG_PCMCIA_RAYCS is not set
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AIRO is not set
+# CONFIG_ATMEL is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_AIRO_CS is not set
+# CONFIG_PCMCIA_WL3501 is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_ADM8211 is not set
+# CONFIG_RTL8180 is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_MWL8K is not set
+# CONFIG_ATH_CARDS is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_BRCMSMAC is not set
+# CONFIG_BRCMFMAC is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_IWL4965 is not set
+# CONFIG_IWL3945 is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_HERMES is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_WL_MEDIATEK is not set
+CONFIG_RTL_CARDS=y
+# CONFIG_RTL8192CE is not set
+# CONFIG_RTL8192SE is not set
+# CONFIG_RTL8192DE is not set
+# CONFIG_RTL8723AE is not set
+# CONFIG_RTL8723BE is not set
+# CONFIG_RTL8188EE is not set
+# CONFIG_RTL8192EE is not set
+# CONFIG_RTL8821AE is not set
+# CONFIG_RTL8192CU is not set
+# CONFIG_RTL8XXXU is not set
+# CONFIG_WL_TI is not set
+# CONFIG_ZD1211RW is not set
+# CONFIG_MWIFIEX is not set
+# CONFIG_CW1200 is not set
+# CONFIG_RSI_91X is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_VMXNET3 is not set
+# CONFIG_FUJITSU_ES is not set
+# CONFIG_ISDN is not set
+# CONFIG_NVM is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_LEDS=y
+CONFIG_INPUT_FF_MEMLESS=y
+CONFIG_INPUT_POLLDEV=y
+CONFIG_INPUT_SPARSEKMAP=y
+# CONFIG_INPUT_MATRIXKMAP is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_TCA8418 is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_LM8333 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_CYPRESS=y
+CONFIG_MOUSE_PS2_LIFEBOOK=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_SENTELIC is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+CONFIG_MOUSE_PS2_FOCALTECH=y
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_CYAPA is not set
+# CONFIG_MOUSE_ELAN_I2C is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+# CONFIG_MOUSE_SYNAPTICS_USB is not set
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_ZHENHUA is not set
+# CONFIG_JOYSTICK_AS5011 is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+# CONFIG_JOYSTICK_XPAD is not set
+CONFIG_INPUT_TABLET=y
+# CONFIG_TABLET_USB_ACECAD is not set
+# CONFIG_TABLET_USB_AIPTEK is not set
+# CONFIG_TABLET_USB_GTCO is not set
+# CONFIG_TABLET_USB_HANWANG is not set
+# CONFIG_TABLET_USB_KBTAB is not set
+# CONFIG_TABLET_SERIAL_WACOM4 is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_PROPERTIES=y
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set
+# CONFIG_TOUCHSCREEN_BU21013 is not set
+# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set
+# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GOODIX is not set
+# CONFIG_TOUCHSCREEN_ILI210X is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELAN is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_WACOM_I2C is not set
+# CONFIG_TOUCHSCREEN_MAX11801 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MMS114 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_PIXCIR is not set
+# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set
+# CONFIG_TOUCHSCREEN_WM97XX is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC_SERIO is not set
+# CONFIG_TOUCHSCREEN_TSC2004 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_ST1232 is not set
+# CONFIG_TOUCHSCREEN_SX8654 is not set
+# CONFIG_TOUCHSCREEN_TPS6507X is not set
+# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_BMA150 is not set
+# CONFIG_INPUT_E3X0_BUTTON is not set
+# CONFIG_INPUT_PCSPKR is not set
+# CONFIG_INPUT_MMA8450 is not set
+# CONFIG_INPUT_MPU3050 is not set
+# CONFIG_INPUT_APANEL is not set
+# CONFIG_INPUT_WISTRON_BTNS is not set
+# CONFIG_INPUT_ATLAS_BTNS is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_KXTJ9 is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_UINPUT=y
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_IMS_PCU is not set
+# CONFIG_INPUT_CMA3000 is not set
+# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set
+# CONFIG_INPUT_DRV2665_HAPTICS is not set
+# CONFIG_INPUT_DRV2667_HAPTICS is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
+# CONFIG_SERIO_PS2MULT is not set
+# CONFIG_SERIO_ARC_PS2 is not set
+# CONFIG_USERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_TTY=y
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_VT_CONSOLE_SLEEP=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_ROCKETPORT is not set
+# CONFIG_CYCLADES is not set
+# CONFIG_MOXA_INTELLIO is not set
+# CONFIG_MOXA_SMARTIO is not set
+# CONFIG_SYNCLINK is not set
+# CONFIG_SYNCLINKMP is not set
+# CONFIG_SYNCLINK_GT is not set
+# CONFIG_NOZOMI is not set
+# CONFIG_ISI is not set
+# CONFIG_N_HDLC is not set
+# CONFIG_N_GSM is not set
+# CONFIG_TRACE_SINK is not set
+CONFIG_DEVMEM=y
+CONFIG_DEVKMEM=y
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_EARLYCON=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y
+CONFIG_SERIAL_8250_PNP=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DMA=y
+CONFIG_SERIAL_8250_PCI=y
+# CONFIG_SERIAL_8250_CS is not set
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+# CONFIG_SERIAL_8250_FSL is not set
+# CONFIG_SERIAL_8250_DW is not set
+# CONFIG_SERIAL_8250_RT288X is not set
+# CONFIG_SERIAL_8250_FINTEK is not set
+# CONFIG_SERIAL_8250_MID is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_SCCNXP is not set
+# CONFIG_SERIAL_SC16IS7XX is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_PCH_UART is not set
+# CONFIG_SERIAL_ARC is not set
+# CONFIG_SERIAL_RP2 is not set
+# CONFIG_SERIAL_FSL_LPUART is not set
+CONFIG_HVC_DRIVER=y
+CONFIG_VIRTIO_CONSOLE=y
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+CONFIG_HW_RANDOM_INTEL=y
+CONFIG_HW_RANDOM_AMD=y
+CONFIG_HW_RANDOM_GEODE=y
+CONFIG_HW_RANDOM_VIA=y
+# CONFIG_HW_RANDOM_VIRTIO is not set
+CONFIG_NVRAM=y
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
+
+#
+# PCMCIA character devices
+#
+# CONFIG_SYNCLINK_CS is not set
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_IPWIRELESS is not set
+# CONFIG_MWAVE is not set
+# CONFIG_PC8736x_GPIO is not set
+# CONFIG_NSC_GPIO is not set
+# CONFIG_RAW_DRIVER is not set
+CONFIG_HPET=y
+# CONFIG_HPET_MMAP is not set
+# CONFIG_HANGCHECK_TIMER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_DEVPORT=y
+# CONFIG_XILLYBUS is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_ACPI_I2C_OPREGION=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+# CONFIG_I2C_CHARDEV is not set
+CONFIG_I2C_MUX=y
+
+#
+# Multiplexer I2C Chip support
+#
+# CONFIG_I2C_MUX_PCA9541 is not set
+# CONFIG_I2C_MUX_REG is not set
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+CONFIG_I2C_I801=y
+# CONFIG_I2C_ISCH is not set
+# CONFIG_I2C_ISMT is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# ACPI drivers
+#
+# CONFIG_I2C_SCMI is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE_PCI is not set
+# CONFIG_I2C_EG20T is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_PXA_PCI is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_ROBOTFUZZ_OSIF is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_SLAVE is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_SPI is not set
+# CONFIG_SPMI is not set
+# CONFIG_HSI is not set
+
+#
+# PPS support
+#
+CONFIG_PPS=y
+# CONFIG_PPS_DEBUG is not set
+
+#
+# PPS clients support
+#
+# CONFIG_PPS_CLIENT_KTIMER is not set
+# CONFIG_PPS_CLIENT_LDISC is not set
+# CONFIG_PPS_CLIENT_GPIO is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+CONFIG_PTP_1588_CLOCK=y
+
+#
+# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
+#
+# CONFIG_PTP_1588_CLOCK_PCH is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_PDA_POWER is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2781 is not set
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_SBS is not set
+# CONFIG_BATTERY_BQ27XXX is not set
+# CONFIG_BATTERY_MAX17040 is not set
+# CONFIG_BATTERY_MAX17042 is not set
+# CONFIG_CHARGER_MAX8903 is not set
+# CONFIG_CHARGER_LP8727 is not set
+# CONFIG_CHARGER_BQ2415X is not set
+# CONFIG_CHARGER_SMB347 is not set
+# CONFIG_BATTERY_GAUGE_LTC2941 is not set
+# CONFIG_POWER_RESET is not set
+# CONFIG_POWER_AVS is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7410 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_K10TEMP is not set
+# CONFIG_SENSORS_FAM15H_POWER is not set
+# CONFIG_SENSORS_APPLESMC is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS620 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_DELL_SMM is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_FSCHMD is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_G762 is not set
+# CONFIG_SENSORS_HIH6130 is not set
+# CONFIG_SENSORS_I5500 is not set
+# CONFIG_SENSORS_CORETEMP is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_POWR1220 is not set
+# CONFIG_SENSORS_LINEAGE is not set
+# CONFIG_SENSORS_LTC2945 is not set
+# CONFIG_SENSORS_LTC4151 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4222 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LTC4260 is not set
+# CONFIG_SENSORS_LTC4261 is not set
+# CONFIG_SENSORS_MAX16065 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX1668 is not set
+# CONFIG_SENSORS_MAX197 is not set
+# CONFIG_SENSORS_MAX6639 is not set
+# CONFIG_SENSORS_MAX6642 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_MAX6697 is not set
+# CONFIG_SENSORS_MAX31790 is not set
+# CONFIG_SENSORS_HTU21 is not set
+# CONFIG_SENSORS_MCP3021 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM73 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LM95234 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_LM95245 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_NTC_THERMISTOR is not set
+# CONFIG_SENSORS_NCT6683 is not set
+# CONFIG_SENSORS_NCT6775 is not set
+# CONFIG_SENSORS_NCT7802 is not set
+# CONFIG_SENSORS_NCT7904 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_PMBUS is not set
+# CONFIG_SENSORS_SHT21 is not set
+# CONFIG_SENSORS_SHTC1 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC6W201 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SCH56XX_COMMON is not set
+# CONFIG_SENSORS_SCH5627 is not set
+# CONFIG_SENSORS_SCH5636 is not set
+# CONFIG_SENSORS_SMM665 is not set
+# CONFIG_SENSORS_ADC128D818 is not set
+# CONFIG_SENSORS_ADS1015 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_INA209 is not set
+# CONFIG_SENSORS_INA2XX is not set
+# CONFIG_SENSORS_TC74 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP103 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VIA_CPUTEMP is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+
+#
+# ACPI drivers
+#
+# CONFIG_SENSORS_ACPI_POWER is not set
+# CONFIG_SENSORS_ATK0110 is not set
+CONFIG_THERMAL=y
+CONFIG_THERMAL_HWMON=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set
+# CONFIG_THERMAL_GOV_FAIR_SHARE is not set
+CONFIG_THERMAL_GOV_STEP_WISE=y
+# CONFIG_THERMAL_GOV_BANG_BANG is not set
+# CONFIG_THERMAL_GOV_USER_SPACE is not set
+# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set
+# CONFIG_THERMAL_EMULATION is not set
+# CONFIG_INTEL_POWERCLAMP is not set
+# CONFIG_X86_PKG_TEMP_THERMAL is not set
+# CONFIG_INTEL_SOC_DTS_THERMAL is not set
+# CONFIG_INT340X_THERMAL is not set
+# CONFIG_INTEL_PCH_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_CORE is not set
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_XILINX_WATCHDOG is not set
+# CONFIG_CADENCE_WATCHDOG is not set
+# CONFIG_DW_WATCHDOG is not set
+# CONFIG_MAX63XX_WATCHDOG is not set
+# CONFIG_ACQUIRE_WDT is not set
+# CONFIG_ADVANTECH_WDT is not set
+# CONFIG_ALIM1535_WDT is not set
+# CONFIG_ALIM7101_WDT is not set
+# CONFIG_F71808E_WDT is not set
+# CONFIG_SP5100_TCO is not set
+# CONFIG_SBC_FITPC2_WATCHDOG is not set
+# CONFIG_EUROTECH_WDT is not set
+# CONFIG_IB700_WDT is not set
+# CONFIG_IBMASR is not set
+# CONFIG_WAFER_WDT is not set
+# CONFIG_I6300ESB_WDT is not set
+# CONFIG_IE6XX_WDT is not set
+# CONFIG_ITCO_WDT is not set
+# CONFIG_IT8712F_WDT is not set
+# CONFIG_IT87_WDT is not set
+# CONFIG_HP_WATCHDOG is not set
+# CONFIG_SC1200_WDT is not set
+# CONFIG_PC87413_WDT is not set
+# CONFIG_NV_TCO is not set
+# CONFIG_60XX_WDT is not set
+# CONFIG_SBC8360_WDT is not set
+# CONFIG_SBC7240_WDT is not set
+# CONFIG_CPU5_WDT is not set
+# CONFIG_SMSC_SCH311X_WDT is not set
+# CONFIG_SMSC37B787_WDT is not set
+# CONFIG_VIA_WDT is not set
+# CONFIG_W83627HF_WDT is not set
+# CONFIG_W83877F_WDT is not set
+# CONFIG_W83977F_WDT is not set
+# CONFIG_MACHZ_WDT is not set
+# CONFIG_SBC_EPX_C3_WATCHDOG is not set
+# CONFIG_BCM7038_WDT is not set
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_CS5535 is not set
+# CONFIG_MFD_AS3711 is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_BCM590XX is not set
+# CONFIG_MFD_AXP20X is not set
+# CONFIG_MFD_CROS_EC is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_DA9052_I2C is not set
+# CONFIG_MFD_DA9055 is not set
+# CONFIG_MFD_DA9062 is not set
+# CONFIG_MFD_DA9063 is not set
+# CONFIG_MFD_DA9150 is not set
+# CONFIG_MFD_DLN2 is not set
+# CONFIG_MFD_MC13XXX_I2C is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_LPC_ICH is not set
+# CONFIG_LPC_SCH is not set
+# CONFIG_MFD_INTEL_LPSS_ACPI is not set
+# CONFIG_MFD_INTEL_LPSS_PCI is not set
+# CONFIG_MFD_JANZ_CMODIO is not set
+# CONFIG_MFD_KEMPLD is not set
+# CONFIG_MFD_88PM800 is not set
+# CONFIG_MFD_88PM805 is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_MAX14577 is not set
+# CONFIG_MFD_MAX77693 is not set
+# CONFIG_MFD_MAX77843 is not set
+# CONFIG_MFD_MAX8907 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_MFD_MT6397 is not set
+# CONFIG_MFD_MENF21BMC is not set
+# CONFIG_MFD_VIPERBOARD is not set
+# CONFIG_MFD_RETU is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_RDC321X is not set
+# CONFIG_MFD_RTSX_PCI is not set
+# CONFIG_MFD_RT5033 is not set
+# CONFIG_MFD_RTSX_USB is not set
+# CONFIG_MFD_RC5T583 is not set
+# CONFIG_MFD_RN5T618 is not set
+# CONFIG_MFD_SEC_CORE is not set
+# CONFIG_MFD_SI476X_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_SKY81452 is not set
+# CONFIG_MFD_SMSC is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_MFD_SYSCON is not set
+# CONFIG_MFD_TI_AM335X_TSCADC is not set
+# CONFIG_MFD_LP3943 is not set
+# CONFIG_MFD_LP8788 is not set
+# CONFIG_MFD_PALMAS is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS65090 is not set
+# CONFIG_MFD_TPS65217 is not set
+# CONFIG_MFD_TPS65218 is not set
+# CONFIG_MFD_TPS6586X is not set
+# CONFIG_MFD_TPS80031 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_TWL6040_CORE is not set
+# CONFIG_MFD_WL1273_CORE is not set
+# CONFIG_MFD_LM3533 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_VX855 is not set
+# CONFIG_MFD_ARIZONA_I2C is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_REGULATOR is not set
+CONFIG_MEDIA_SUPPORT=y
+
+#
+# Multimedia core support
+#
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
+# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set
+# CONFIG_MEDIA_RADIO_SUPPORT is not set
+# CONFIG_MEDIA_SDR_SUPPORT is not set
+# CONFIG_MEDIA_RC_SUPPORT is not set
+# CONFIG_MEDIA_CONTROLLER is not set
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_V4L2=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+# CONFIG_TTPCI_EEPROM is not set
+
+#
+# Media drivers
+#
+# CONFIG_MEDIA_USB_SUPPORT is not set
+CONFIG_MEDIA_PCI_SUPPORT=y
+
+#
+# Media capture support
+#
+# CONFIG_VIDEO_SOLO6X10 is not set
+# CONFIG_VIDEO_TW68 is not set
+# CONFIG_VIDEO_ZORAN is not set
+CONFIG_V4L_PLATFORM_DRIVERS=y
+# CONFIG_VIDEO_CAFE_CCIC is not set
+# CONFIG_SOC_CAMERA is not set
+# CONFIG_V4L_MEM2MEM_DRIVERS is not set
+# CONFIG_V4L_TEST_DRIVERS is not set
+
+#
+# Supported MMC/SDIO adapters
+#
+# CONFIG_CYPRESS_FIRMWARE is not set
+
+#
+# Media ancillary drivers (tuners, sensors, i2c, frontends)
+#
+CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
+
+#
+# Audio decoders, processors and mixers
+#
+
+#
+# RDS decoders
+#
+
+#
+# Video decoders
+#
+
+#
+# Video and audio decoders
+#
+
+#
+# Video encoders
+#
+
+#
+# Camera sensor devices
+#
+
+#
+# Flash devices
+#
+
+#
+# Video improvement chips
+#
+
+#
+# Audio/Video compression chips
+#
+
+#
+# Miscellaneous helper chips
+#
+
+#
+# Sensors used on soc_camera driver
+#
+
+#
+# Tools to develop new frontends
+#
+# CONFIG_DVB_DUMMY_FE is not set
+
+#
+# Graphics support
+#
+CONFIG_AGP=y
+# CONFIG_AGP_ALI is not set
+# CONFIG_AGP_ATI is not set
+# CONFIG_AGP_AMD is not set
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+# CONFIG_AGP_NVIDIA is not set
+# CONFIG_AGP_SIS is not set
+# CONFIG_AGP_SWORKS is not set
+# CONFIG_AGP_VIA is not set
+# CONFIG_AGP_EFFICEON is not set
+CONFIG_INTEL_GTT=y
+CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
+# CONFIG_VGA_SWITCHEROO is not set
+CONFIG_DRM=y
+CONFIG_DRM_MIPI_DSI=y
+CONFIG_DRM_KMS_HELPER=y
+CONFIG_DRM_KMS_FB_HELPER=y
+CONFIG_DRM_FBDEV_EMULATION=y
+# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set
+
+#
+# I2C encoder or helper chips
+#
+# CONFIG_DRM_I2C_ADV7511 is not set
+# CONFIG_DRM_I2C_CH7006 is not set
+# CONFIG_DRM_I2C_SIL164 is not set
+# CONFIG_DRM_I2C_NXP_TDA998X is not set
+# CONFIG_DRM_TDFX is not set
+# CONFIG_DRM_R128 is not set
+# CONFIG_DRM_RADEON is not set
+# CONFIG_DRM_AMDGPU is not set
+# CONFIG_DRM_NOUVEAU is not set
+CONFIG_DRM_I915=y
+# CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT is not set
+# CONFIG_DRM_MGA is not set
+# CONFIG_DRM_SIS is not set
+# CONFIG_DRM_VIA is not set
+# CONFIG_DRM_SAVAGE is not set
+# CONFIG_DRM_VGEM is not set
+# CONFIG_DRM_VMWGFX is not set
+# CONFIG_DRM_GMA500 is not set
+# CONFIG_DRM_UDL is not set
+# CONFIG_DRM_AST is not set
+# CONFIG_DRM_MGAG200 is not set
+# CONFIG_DRM_CIRRUS_QEMU is not set
+# CONFIG_DRM_QXL is not set
+# CONFIG_DRM_BOCHS is not set
+# CONFIG_DRM_VIRTIO_GPU is not set
+CONFIG_DRM_PANEL=y
+
+#
+# Display Panels
+#
+CONFIG_DRM_BRIDGE=y
+
+#
+# Display Interface Bridges
+#
+# CONFIG_DRM_VIGS is not set
+CONFIG_YAGL=y
+CONFIG_YAGL_DEBUG=y
+
+#
+# Frame buffer Devices
+#
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+CONFIG_FB_CMDLINE=y
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+CONFIG_FB_SYS_FILLRECT=y
+CONFIG_FB_SYS_COPYAREA=y
+CONFIG_FB_SYS_IMAGEBLIT=y
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+CONFIG_FB_SYS_FOPS=y
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ARC is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_VGA16 is not set
+# CONFIG_FB_UVESA is not set
+# CONFIG_FB_VESA is not set
+# CONFIG_FB_EFI is not set
+# CONFIG_FB_N411 is not set
+# CONFIG_FB_HGA is not set
+# CONFIG_FB_OPENCORES is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I740 is not set
+# CONFIG_FB_I810 is not set
+# CONFIG_FB_LE80578 is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_GEODE is not set
+# CONFIG_FB_SMSCUFX is not set
+# CONFIG_FB_UDL is not set
+# CONFIG_FB_IBM_GXT4500 is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_AUO_K190X is not set
+# CONFIG_FB_SIMPLE is not set
+# CONFIG_FB_SM712 is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+# CONFIG_BACKLIGHT_APPLE is not set
+# CONFIG_BACKLIGHT_PM8941_WLED is not set
+# CONFIG_BACKLIGHT_SAHARA is not set
+# CONFIG_BACKLIGHT_ADP8860 is not set
+# CONFIG_BACKLIGHT_ADP8870 is not set
+# CONFIG_BACKLIGHT_LM3639 is not set
+# CONFIG_BACKLIGHT_LV5207LP is not set
+# CONFIG_BACKLIGHT_BD6107 is not set
+# CONFIG_VGASTATE is not set
+CONFIG_HDMI=y
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+CONFIG_VGACON_SOFT_SCROLLBACK=y
+CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_DUMMY_CONSOLE_COLUMNS=80
+CONFIG_DUMMY_CONSOLE_ROWS=25
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_HWDEP=y
+CONFIG_SND_JACK=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_SEQ_DUMMY=y
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_PCM_TIMER=y
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_HRTIMER=y
+CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_MAX_CARDS=32
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_PROC_FS=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_VMASTER=y
+CONFIG_SND_DMA_SGBUF=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_AC97_CODEC=y
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_PCSP is not set
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_ALOOP is not set
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_AC97_POWER_SAVE is not set
+CONFIG_SND_PCI=y
+# CONFIG_SND_AD1889 is not set
+# CONFIG_SND_ALS300 is not set
+# CONFIG_SND_ALS4000 is not set
+# CONFIG_SND_ALI5451 is not set
+# CONFIG_SND_ASIHPI is not set
+# CONFIG_SND_ATIIXP is not set
+# CONFIG_SND_ATIIXP_MODEM is not set
+# CONFIG_SND_AU8810 is not set
+# CONFIG_SND_AU8820 is not set
+# CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AW2 is not set
+# CONFIG_SND_AZT3328 is not set
+# CONFIG_SND_BT87X is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_OXYGEN is not set
+# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CS5530 is not set
+# CONFIG_SND_CS5535AUDIO is not set
+# CONFIG_SND_CTXFI is not set
+# CONFIG_SND_DARLA20 is not set
+# CONFIG_SND_GINA20 is not set
+# CONFIG_SND_LAYLA20 is not set
+# CONFIG_SND_DARLA24 is not set
+# CONFIG_SND_GINA24 is not set
+# CONFIG_SND_LAYLA24 is not set
+# CONFIG_SND_MONA is not set
+# CONFIG_SND_MIA is not set
+# CONFIG_SND_ECHO3G is not set
+# CONFIG_SND_INDIGO is not set
+# CONFIG_SND_INDIGOIO is not set
+# CONFIG_SND_INDIGODJ is not set
+# CONFIG_SND_INDIGOIOX is not set
+# CONFIG_SND_INDIGODJX is not set
+# CONFIG_SND_EMU10K1 is not set
+# CONFIG_SND_EMU10K1X is not set
+# CONFIG_SND_ENS1370 is not set
+# CONFIG_SND_ENS1371 is not set
+# CONFIG_SND_ES1938 is not set
+# CONFIG_SND_ES1968 is not set
+# CONFIG_SND_FM801 is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
+# CONFIG_SND_ICE1712 is not set
+# CONFIG_SND_ICE1724 is not set
+CONFIG_SND_INTEL8X0=y
+# CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_LOLA is not set
+# CONFIG_SND_LX6464ES is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_MIXART is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_PCXHR is not set
+# CONFIG_SND_RIPTIDE is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
+# CONFIG_SND_SE6X is not set
+# CONFIG_SND_SIS7019 is not set
+# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_TRIDENT is not set
+# CONFIG_SND_VIA82XX is not set
+# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VIRTUOSO is not set
+# CONFIG_SND_VX222 is not set
+# CONFIG_SND_YMFPCI is not set
+
+#
+# HD-Audio
+#
+CONFIG_SND_HDA=y
+CONFIG_SND_HDA_INTEL=y
+CONFIG_SND_HDA_HWDEP=y
+# CONFIG_SND_HDA_RECONFIG is not set
+# CONFIG_SND_HDA_INPUT_BEEP is not set
+# CONFIG_SND_HDA_PATCH_LOADER is not set
+CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_ANALOG=y
+CONFIG_SND_HDA_CODEC_SIGMATEL=y
+CONFIG_SND_HDA_CODEC_VIA=y
+CONFIG_SND_HDA_CODEC_HDMI=y
+CONFIG_SND_HDA_CODEC_CIRRUS=y
+CONFIG_SND_HDA_CODEC_CONEXANT=y
+CONFIG_SND_HDA_CODEC_CA0110=y
+CONFIG_SND_HDA_CODEC_CA0132=y
+# CONFIG_SND_HDA_CODEC_CA0132_DSP is not set
+CONFIG_SND_HDA_CODEC_CMEDIA=y
+CONFIG_SND_HDA_CODEC_SI3054=y
+CONFIG_SND_HDA_GENERIC=y
+CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
+CONFIG_SND_HDA_CORE=y
+CONFIG_SND_HDA_I915=y
+CONFIG_SND_HDA_PREALLOC_SIZE=64
+CONFIG_SND_USB=y
+# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_UA101 is not set
+# CONFIG_SND_USB_USX2Y is not set
+# CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_USB_US122L is not set
+# CONFIG_SND_USB_6FIRE is not set
+# CONFIG_SND_USB_HIFACE is not set
+# CONFIG_SND_BCD2000 is not set
+# CONFIG_SND_USB_POD is not set
+# CONFIG_SND_USB_PODHD is not set
+# CONFIG_SND_USB_TONEPORT is not set
+# CONFIG_SND_USB_VARIAX is not set
+CONFIG_SND_PCMCIA=y
+# CONFIG_SND_VXPOCKET is not set
+# CONFIG_SND_PDAUDIOCF is not set
+# CONFIG_SND_SOC is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_AC97_BUS=y
+
+#
+# HID support
+#
+CONFIG_HID=y
+# CONFIG_HID_BATTERY_STRENGTH is not set
+CONFIG_HIDRAW=y
+# CONFIG_UHID is not set
+CONFIG_HID_GENERIC=y
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_ACRUX is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_APPLEIR is not set
+# CONFIG_HID_AUREAL is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_BETOP_FF is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CORSAIR is not set
+# CONFIG_HID_PRODIKEYS is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EMS_FF is not set
+# CONFIG_HID_ELECOM is not set
+# CONFIG_HID_ELO is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_GEMBIRD is not set
+# CONFIG_HID_GFRM is not set
+# CONFIG_HID_HOLTEK is not set
+# CONFIG_HID_GT683R is not set
+# CONFIG_HID_KEYTOUCH is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WALTOP is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_ICADE is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LCPOWER is not set
+# CONFIG_HID_LENOVO is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MAGICMOUSE is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PENMOUNT is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_PLANTRONICS is not set
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_ROCCAT is not set
+# CONFIG_HID_SAITEK is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SPEEDLINK is not set
+# CONFIG_HID_STEELSERIES is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_RMI is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TIVO is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THINGM is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_WIIMOTE is not set
+# CONFIG_HID_XINMO is not set
+# CONFIG_HID_ZEROPLUS is not set
+# CONFIG_HID_ZYDACRON is not set
+# CONFIG_HID_SENSOR_HUB is not set
+
+#
+# USB HID support
+#
+CONFIG_USB_HID=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+
+#
+# I2C HID support
+#
+# CONFIG_I2C_HID is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEFAULT_PERSIST=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_ULPI_BUS is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_XHCI_HCD is not set
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_PCI=y
+# CONFIG_USB_EHCI_HCD_PLATFORM is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_FOTG210_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PCI=y
+# CONFIG_USB_OHCI_HCD_PLATFORM is not set
+CONFIG_USB_UHCI_HCD=y
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HCD_TEST_MODE is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+CONFIG_USB_PRINTER=y
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_REALTEK is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_STORAGE_ENE_UB6250 is not set
+# CONFIG_USB_UAS is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+# CONFIG_USBIP_CORE is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_DWC3 is not set
+# CONFIG_USB_DWC2 is not set
+# CONFIG_USB_CHIPIDEA is not set
+# CONFIG_USB_ISP1760 is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_EHSET_TEST_FIXTURE is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_YUREX is not set
+# CONFIG_USB_EZUSB_FX2 is not set
+# CONFIG_USB_HSIC_USB3503 is not set
+# CONFIG_USB_LINK_LAYER_TEST is not set
+# CONFIG_USB_CHAOSKEY is not set
+
+#
+# USB Physical Layer drivers
+#
+# CONFIG_USB_PHY is not set
+# CONFIG_NOP_USB_XCEIV is not set
+# CONFIG_USB_ISP1301 is not set
+# CONFIG_USB_GADGET is not set
+# CONFIG_USB_LED_TRIG is not set
+# CONFIG_UWB is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+# CONFIG_LEDS_CLASS_FLASH is not set
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_LM3530 is not set
+# CONFIG_LEDS_LM3642 is not set
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_LP5523 is not set
+# CONFIG_LEDS_LP5562 is not set
+# CONFIG_LEDS_LP8501 is not set
+# CONFIG_LEDS_LP8860 is not set
+# CONFIG_LEDS_CLEVO_MAIL is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_PCA963X is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_INTEL_SS4200 is not set
+# CONFIG_LEDS_TCA6507 is not set
+# CONFIG_LEDS_TLC591XX is not set
+# CONFIG_LEDS_LM355x is not set
+# CONFIG_LEDS_OT200 is not set
+
+#
+# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
+#
+# CONFIG_LEDS_BLINKM is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_TRIGGER_TIMER is not set
+# CONFIG_LEDS_TRIGGER_ONESHOT is not set
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_CPU is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_LEDS_TRIGGER_TRANSIENT is not set
+# CONFIG_LEDS_TRIGGER_CAMERA is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_EDAC_ATOMIC_SCRUB=y
+CONFIG_EDAC_SUPPORT=y
+CONFIG_EDAC=y
+CONFIG_EDAC_LEGACY_SYSFS=y
+# CONFIG_EDAC_DEBUG is not set
+CONFIG_EDAC_DECODE_MCE=y
+# CONFIG_EDAC_MM_EDAC is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_HCTOSYS is not set
+CONFIG_RTC_SYSTOHC=y
+CONFIG_RTC_SYSTOHC_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_ABB5ZES3 is not set
+# CONFIG_RTC_DRV_ABX80X is not set
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS3232 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_ISL12022 is not set
+# CONFIG_RTC_DRV_ISL12057 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF2127 is not set
+# CONFIG_RTC_DRV_PCF8523 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF85063 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+# CONFIG_RTC_DRV_EM3027 is not set
+# CONFIG_RTC_DRV_RV3029C2 is not set
+# CONFIG_RTC_DRV_RV8803 is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1685_FAMILY is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_DS2404 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+
+#
+# HID Sensor RTC drivers
+#
+# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+
+#
+# DMA Devices
+#
+CONFIG_DMA_ACPI=y
+# CONFIG_INTEL_IDMA64 is not set
+# CONFIG_PCH_DMA is not set
+# CONFIG_DW_DMAC is not set
+# CONFIG_DW_DMAC_PCI is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+# CONFIG_VIRT_DRIVERS is not set
+CONFIG_VIRTIO=y
+
+#
+# Virtio drivers
+#
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_PCI_LEGACY=y
+CONFIG_VIRTIO_BALLOON=y
+# CONFIG_VIRTIO_INPUT is not set
+CONFIG_VIRTIO_MMIO=y
+# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+CONFIG_STAGING=y
+# CONFIG_SLICOSS is not set
+# CONFIG_PRISM2_USB is not set
+# CONFIG_COMEDI is not set
+# CONFIG_RTL8192U is not set
+# CONFIG_RTLLIB is not set
+# CONFIG_R8712U is not set
+# CONFIG_R8188EU is not set
+# CONFIG_R8723AU is not set
+# CONFIG_RTS5208 is not set
+# CONFIG_VT6655 is not set
+# CONFIG_VT6656 is not set
+# CONFIG_FB_SM750 is not set
+# CONFIG_FB_XGI is not set
+
+#
+# Speakup console speech
+#
+# CONFIG_SPEAKUP is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set
+# CONFIG_STAGING_MEDIA is not set
+
+#
+# Android
+#
+# CONFIG_ASHMEM is not set
+CONFIG_ANDROID_TIMED_OUTPUT=y
+# CONFIG_ANDROID_LOW_MEMORY_KILLER is not set
+# CONFIG_SYNC is not set
+# CONFIG_ION is not set
+# CONFIG_WIMAX_GDM72XX is not set
+# CONFIG_LTE_GDM724X is not set
+# CONFIG_LUSTRE_FS is not set
+# CONFIG_DGNC is not set
+# CONFIG_DGAP is not set
+# CONFIG_GS_FPGABOOT is not set
+# CONFIG_WILC1000_DRIVER is not set
+# CONFIG_MOST is not set
+CONFIG_X86_PLATFORM_DEVICES=y
+# CONFIG_ACERHDF is not set
+# CONFIG_ASUS_LAPTOP is not set
+# CONFIG_DELL_SMO8800 is not set
+# CONFIG_DELL_RBTN is not set
+# CONFIG_FUJITSU_LAPTOP is not set
+# CONFIG_FUJITSU_TABLET is not set
+# CONFIG_AMILO_RFKILL is not set
+# CONFIG_HP_ACCEL is not set
+# CONFIG_HP_WIRELESS is not set
+# CONFIG_MSI_LAPTOP is not set
+# CONFIG_PANASONIC_LAPTOP is not set
+# CONFIG_COMPAL_LAPTOP is not set
+# CONFIG_SONY_LAPTOP is not set
+# CONFIG_IDEAPAD_LAPTOP is not set
+# CONFIG_THINKPAD_ACPI is not set
+# CONFIG_SENSORS_HDAPS is not set
+# CONFIG_INTEL_MENLOW is not set
+CONFIG_EEEPC_LAPTOP=y
+# CONFIG_ACPI_WMI is not set
+# CONFIG_TOPSTAR_LAPTOP is not set
+# CONFIG_TOSHIBA_BT_RFKILL is not set
+# CONFIG_TOSHIBA_HAPS is not set
+# CONFIG_ACPI_CMPC is not set
+# CONFIG_INTEL_IPS is not set
+# CONFIG_IBM_RTL is not set
+# CONFIG_SAMSUNG_LAPTOP is not set
+# CONFIG_INTEL_OAKTRAIL is not set
+# CONFIG_SAMSUNG_Q10 is not set
+# CONFIG_APPLE_GMUX is not set
+# CONFIG_INTEL_RST is not set
+# CONFIG_INTEL_SMARTCONNECT is not set
+# CONFIG_PVPANIC is not set
+# CONFIG_INTEL_PMC_IPC is not set
+# CONFIG_SURFACE_PRO3_BUTTON is not set
+# CONFIG_CHROME_PLATFORMS is not set
+
+#
+# Hardware Spinlock drivers
+#
+
+#
+# Clock Source drivers
+#
+CONFIG_CLKSRC_I8253=y
+CONFIG_CLKEVT_I8253=y
+CONFIG_I8253_LOCK=y
+CONFIG_CLKBLD_I8253=y
+# CONFIG_ATMEL_PIT is not set
+# CONFIG_SH_TIMER_CMT is not set
+# CONFIG_SH_TIMER_MTU2 is not set
+# CONFIG_SH_TIMER_TMU is not set
+# CONFIG_EM_TIMER_STI is not set
+# CONFIG_MAILBOX is not set
+CONFIG_IOMMU_SUPPORT=y
+
+#
+# Generic IOMMU Pagetable Support
+#
+# CONFIG_INTEL_IOMMU is not set
+
+#
+# Remoteproc drivers
+#
+# CONFIG_STE_MODEM_RPROC is not set
+
+#
+# Rpmsg drivers
+#
+
+#
+# SOC (System On Chip) specific Drivers
+#
+# CONFIG_SUNXI_SRAM is not set
+# CONFIG_SOC_TI is not set
+# CONFIG_PM_DEVFREQ is not set
+# CONFIG_EXTCON is not set
+# CONFIG_MEMORY is not set
+# CONFIG_IIO is not set
+# CONFIG_NTB is not set
+# CONFIG_VME_BUS is not set
+# CONFIG_PWM is not set
+# CONFIG_IPACK_BUS is not set
+# CONFIG_RESET_CONTROLLER is not set
+# CONFIG_FMC is not set
+
+#
+# PHY Subsystem
+#
+# CONFIG_GENERIC_PHY is not set
+# CONFIG_PHY_PXA_28NM_HSIC is not set
+# CONFIG_PHY_PXA_28NM_USB2 is not set
+# CONFIG_BCM_KONA_USB2_PHY is not set
+# CONFIG_POWERCAP is not set
+# CONFIG_MCB is not set
+
+#
+# Performance monitor support
+#
+CONFIG_RAS=y
+# CONFIG_AMD_MCE_INJ is not set
+# CONFIG_THUNDERBOLT is not set
+
+#
+# Android
+#
+CONFIG_ANDROID=y
+# CONFIG_ANDROID_BINDER_IPC is not set
+# CONFIG_NVMEM is not set
+# CONFIG_STM is not set
+# CONFIG_STM_DUMMY is not set
+# CONFIG_STM_SOURCE_CONSOLE is not set
+# CONFIG_INTEL_TH is not set
+
+#
+# FPGA Configuration Support
+#
+# CONFIG_FPGA is not set
+CONFIG_MARU=y
+CONFIG_MARU_VIRTIO_TOUCHSCREEN=y
+# CONFIG_MARU_CAMERA is not set
+# CONFIG_MARU_BACKLIGHT is not set
+CONFIG_MARU_JACK=y
+CONFIG_MARU_VIRTIO_HWKEY=y
+CONFIG_MARU_VIRTIO_TABLET=y
+CONFIG_MARU_VIRTIO_KEYBOARD=y
+CONFIG_MARU_VIRTIO_EVDI=y
+CONFIG_MARU_VIRTIO_SENSOR=y
+CONFIG_MARU_VIRTIO_NFC=y
+CONFIG_MARU_BRILLCODEC=y
+CONFIG_MARU_VIRTIO_VMODEM=y
+CONFIG_MARU_VIRTIO_ROTARY=y
+# CONFIG_MARU_EXTENSION_SOURCE is not set
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_FIRMWARE_MEMMAP=y
+# CONFIG_DELL_RBU is not set
+# CONFIG_DCDBAS is not set
+CONFIG_DMIID=y
+# CONFIG_DMI_SYSFS is not set
+CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y
+# CONFIG_ISCSI_IBFT_FIND is not set
+# CONFIG_GOOGLE_FIRMWARE is not set
+
+#
+# EFI (Extensible Firmware Interface) Support
+#
+CONFIG_EFI_VARS=y
+CONFIG_EFI_ESRT=y
+CONFIG_EFI_RUNTIME_MAP=y
+# CONFIG_EFI_FAKE_MEMMAP is not set
+CONFIG_EFI_RUNTIME_WRAPPERS=y
+
+#
+# File systems
+#
+CONFIG_DCACHE_WORD_ACCESS=y
+# CONFIG_EXT2_FS is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_USE_FOR_EXT2=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_EXT4_ENCRYPTION is not set
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+# CONFIG_F2FS_FS is not set
+# CONFIG_FS_DAX is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_EXPORTFS=y
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_FANOTIFY is not set
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+# CONFIG_QUOTA_DEBUG is not set
+CONFIG_QUOTA_TREE=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+# CONFIG_CUSE is not set
+# CONFIG_OVERLAY_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_VMCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+# CONFIG_PROC_CHILDREN is not set
+CONFIG_KERNFS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+# CONFIG_CONFIGFS_FS is not set
+# CONFIG_EFIVAR_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_PSTORE is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V2=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_SWAP is not set
+# CONFIG_NFS_V4_1 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFS_USE_LEGACY_DNS is not set
+CONFIG_NFS_USE_KERNEL_DNS=y
+# CONFIG_NFSD is not set
+CONFIG_GRACE_PERIOD=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+# CONFIG_SUNRPC_DEBUG is not set
+# CONFIG_CEPH_FS is not set
+CONFIG_CIFS=y
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_UPCALL is not set
+CONFIG_CIFS_XATTR=y
+# CONFIG_CIFS_POSIX is not set
+# CONFIG_CIFS_ACL is not set
+CONFIG_CIFS_DEBUG=y
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_DFS_UPCALL is not set
+# CONFIG_CIFS_SMB2 is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+CONFIG_9P_FS=y
+# CONFIG_9P_FS_POSIX_ACL is not set
+# CONFIG_9P_FS_SECURITY is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+CONFIG_NLS_CODEPAGE_949=y
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_MAC_ROMAN is not set
+# CONFIG_NLS_MAC_CELTIC is not set
+# CONFIG_NLS_MAC_CENTEURO is not set
+# CONFIG_NLS_MAC_CROATIAN is not set
+# CONFIG_NLS_MAC_CYRILLIC is not set
+# CONFIG_NLS_MAC_GAELIC is not set
+# CONFIG_NLS_MAC_GREEK is not set
+# CONFIG_NLS_MAC_ICELAND is not set
+# CONFIG_NLS_MAC_INUIT is not set
+# CONFIG_NLS_MAC_ROMANIAN is not set
+# CONFIG_NLS_MAC_TURKISH is not set
+CONFIG_NLS_UTF8=y
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+
+#
+# printk and dmesg options
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+
+#
+# Compile-time checks and compiler options
+#
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_INFO_REDUCED is not set
+# CONFIG_DEBUG_INFO_SPLIT is not set
+# CONFIG_DEBUG_INFO_DWARF4 is not set
+# CONFIG_GDB_SCRIPTS is not set
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_READABLE_ASM is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_PAGE_OWNER is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+CONFIG_SECTION_MISMATCH_WARN_ONLY=y
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+CONFIG_FRAME_POINTER=y
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1
+CONFIG_DEBUG_KERNEL=y
+
+#
+# Memory Debugging
+#
+# CONFIG_PAGE_EXTENSION is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+# CONFIG_DEBUG_KMEMLEAK is not set
+CONFIG_DEBUG_STACK_USAGE=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_VIRTUAL is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_HAVE_DEBUG_STACKOVERFLOW=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_HAVE_ARCH_KMEMCHECK=y
+# CONFIG_DEBUG_SHIRQ is not set
+
+#
+# Debug Lockups and Hangs
+#
+# CONFIG_LOCKUP_DETECTOR is not set
+# CONFIG_DETECT_HUNG_TASK is not set
+# CONFIG_PANIC_ON_OOPS is not set
+CONFIG_PANIC_ON_OOPS_VALUE=0
+CONFIG_PANIC_TIMEOUT=0
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHED_INFO=y
+CONFIG_SCHEDSTATS=y
+# CONFIG_SCHED_STACK_END_CHECK is not set
+# CONFIG_DEBUG_TIMEKEEPING is not set
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_PREEMPT=y
+
+#
+# Lock Debugging (spinlocks, mutexes, etc...)
+#
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_ATOMIC_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_LOCK_TORTURE_TEST is not set
+CONFIG_TRACE_IRQFLAGS=y
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_PI_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+
+#
+# RCU Debugging
+#
+# CONFIG_PROVE_RCU is not set
+# CONFIG_SPARSE_RCU_POINTER is not set
+# CONFIG_TORTURE_TEST is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=21
+# CONFIG_RCU_TRACE is not set
+# CONFIG_RCU_EQS_DEBUG is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_NOTIFIER_ERROR_INJECTION is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y
+# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACER_MAX_TRACE=y
+CONFIG_TRACE_CLOCK=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_TRACER_SNAPSHOT=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+CONFIG_STACK_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_KPROBE_EVENT=y
+CONFIG_UPROBE_EVENT=y
+CONFIG_PROBE_EVENTS=y
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_FTRACE_MCOUNT_RECORD=y
+# CONFIG_FTRACE_STARTUP_TEST is not set
+CONFIG_MMIOTRACE=y
+# CONFIG_MMIOTRACE_TEST is not set
+# CONFIG_TRACEPOINT_BENCHMARK is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_RING_BUFFER_STARTUP_TEST is not set
+# CONFIG_TRACE_ENUM_MAP_FILE is not set
+
+#
+# Runtime Testing
+#
+# CONFIG_LKDTM is not set
+# CONFIG_TEST_LIST_SORT is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_RBTREE_TEST is not set
+# CONFIG_INTERVAL_TREE_TEST is not set
+# CONFIG_PERCPU_TEST is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_TEST_HEXDUMP is not set
+# CONFIG_TEST_STRING_HELPERS is not set
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_TEST_PRINTF is not set
+# CONFIG_TEST_RHASHTABLE is not set
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_TEST_LKM is not set
+# CONFIG_TEST_USER_COPY is not set
+# CONFIG_TEST_BPF is not set
+# CONFIG_TEST_FIRMWARE is not set
+# CONFIG_TEST_UDELAY is not set
+# CONFIG_MEMTEST is not set
+# CONFIG_TEST_STATIC_KEYS is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_X86_VERBOSE_BOOTUP=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_EARLY_PRINTK_DBGP=y
+# CONFIG_EARLY_PRINTK_EFI is not set
+# CONFIG_X86_PTDUMP_CORE is not set
+# CONFIG_X86_PTDUMP is not set
+# CONFIG_EFI_PGT_DUMP is not set
+CONFIG_DEBUG_RODATA=y
+# CONFIG_DEBUG_RODATA_TEST is not set
+# CONFIG_DEBUG_WX is not set
+# CONFIG_DEBUG_SET_MODULE_RONX is not set
+# CONFIG_DEBUG_NX_TEST is not set
+CONFIG_DOUBLEFAULT=y
+# CONFIG_DEBUG_TLBFLUSH is not set
+# CONFIG_IOMMU_STRESS is not set
+CONFIG_HAVE_MMIOTRACE_SUPPORT=y
+# CONFIG_X86_DECODER_SELFTEST is not set
+CONFIG_IO_DELAY_TYPE_0X80=0
+CONFIG_IO_DELAY_TYPE_0XED=1
+CONFIG_IO_DELAY_TYPE_UDELAY=2
+CONFIG_IO_DELAY_TYPE_NONE=3
+CONFIG_IO_DELAY_0X80=y
+# CONFIG_IO_DELAY_0XED is not set
+# CONFIG_IO_DELAY_UDELAY is not set
+# CONFIG_IO_DELAY_NONE is not set
+CONFIG_DEFAULT_IO_DELAY_TYPE=0
+CONFIG_DEBUG_BOOT_PARAMS=y
+# CONFIG_CPA_DEBUG is not set
+CONFIG_OPTIMIZE_INLINING=y
+# CONFIG_DEBUG_ENTRY is not set
+# CONFIG_DEBUG_NMI_SELFTEST is not set
+# CONFIG_X86_DEBUG_STATIC_CPU_HAS is not set
+CONFIG_X86_DEBUG_FPU=y
+# CONFIG_PUNIT_ATOM_DEBUG is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_PERSISTENT_KEYRINGS is not set
+# CONFIG_BIG_KEYS is not set
+# CONFIG_ENCRYPTED_KEYS is not set
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
+CONFIG_SECURITY_NETWORK=y
+# CONFIG_SECURITY_NETWORK_XFRM is not set
+# CONFIG_SECURITY_PATH is not set
+# CONFIG_SECURITY_SELINUX is not set
+CONFIG_SECURITY_SMACK=y
+CONFIG_SECURITY_SMACK_BRINGUP=y
+# CONFIG_SECURITY_SMACK_NETFILTER is not set
+# CONFIG_SECURITY_TOMOYO is not set
+# CONFIG_SECURITY_APPARMOR is not set
+# CONFIG_SECURITY_YAMA is not set
+CONFIG_INTEGRITY=y
+# CONFIG_INTEGRITY_SIGNATURE is not set
+CONFIG_INTEGRITY_AUDIT=y
+# CONFIG_IMA is not set
+# CONFIG_EVM is not set
+CONFIG_DEFAULT_SECURITY_SMACK=y
+# CONFIG_DEFAULT_SECURITY_DAC is not set
+CONFIG_DEFAULT_SECURITY="smack"
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_RNG_DEFAULT=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_AKCIPHER2=y
+# CONFIG_CRYPTO_RSA is not set
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_USER is not set
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+CONFIG_CRYPTO_GF128MUL=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_NULL2=y
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_MCRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=y
+CONFIG_CRYPTO_GCM=y
+# CONFIG_CRYPTO_CHACHA20POLY1305 is not set
+CONFIG_CRYPTO_SEQIV=y
+CONFIG_CRYPTO_ECHAINIV=y
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=y
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+# CONFIG_CRYPTO_KEYWRAP is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_CMAC=y
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_CRC32C_INTEL is not set
+# CONFIG_CRYPTO_CRC32 is not set
+# CONFIG_CRYPTO_CRC32_PCLMUL is not set
+CONFIG_CRYPTO_CRCT10DIF=y
+CONFIG_CRYPTO_GHASH=y
+# CONFIG_CRYPTO_POLY1305 is not set
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_AES_586=y
+# CONFIG_CRYPTO_AES_NI_INTEL is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SALSA20_586 is not set
+# CONFIG_CRYPTO_CHACHA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_SERPENT_SSE2_586 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_TWOFISH_586 is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+# CONFIG_CRYPTO_842 is not set
+# CONFIG_CRYPTO_LZ4 is not set
+# CONFIG_CRYPTO_LZ4HC is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=y
+CONFIG_CRYPTO_DRBG_HMAC=y
+# CONFIG_CRYPTO_DRBG_HASH is not set
+# CONFIG_CRYPTO_DRBG_CTR is not set
+CONFIG_CRYPTO_DRBG=y
+CONFIG_CRYPTO_JITTERENTROPY=y
+# CONFIG_CRYPTO_USER_API_HASH is not set
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
+# CONFIG_CRYPTO_USER_API_RNG is not set
+# CONFIG_CRYPTO_USER_API_AEAD is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_PADLOCK is not set
+# CONFIG_CRYPTO_DEV_GEODE is not set
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_CRYPTO_DEV_CCP is not set
+# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set
+# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set
+# CONFIG_ASYMMETRIC_KEY_TYPE is not set
+
+#
+# Certificates for signature checking
+#
+# CONFIG_SYSTEM_TRUSTED_KEYRING is not set
+CONFIG_HAVE_KVM=y
+CONFIG_VIRTUALIZATION=y
+# CONFIG_KVM is not set
+# CONFIG_LGUEST is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_HAVE_ARCH_BITREVERSE is not set
+CONFIG_RATIONAL=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_NET_UTILS=y
+CONFIG_GENERIC_FIND_FIRST_BIT=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_IO=y
+CONFIG_ARCH_HAS_FAST_MULTIPLIER=y
+# CONFIG_CRC_CCITT is not set
+CONFIG_CRC16=y
+CONFIG_CRC_T10DIF=y
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+CONFIG_CRC32_SLICEBY8=y
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+# CONFIG_CRC32_BIT is not set
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+# CONFIG_CRC8 is not set
+CONFIG_AUDIT_GENERIC=y
+# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set
+# CONFIG_RANDOM32_SELFTEST is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_LZ4_DECOMPRESS=y
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_BCJ=y
+# CONFIG_XZ_DEC_TEST is not set
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_XZ=y
+CONFIG_DECOMPRESS_LZO=y
+CONFIG_DECOMPRESS_LZ4=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_INTERVAL_TREE=y
+CONFIG_ASSOCIATIVE_ARRAY=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT_MAP=y
+CONFIG_HAS_DMA=y
+CONFIG_CHECK_SIGNATURE=y
+CONFIG_CPU_RMAP=y
+CONFIG_DQL=y
+CONFIG_GLOB=y
+# CONFIG_GLOB_SELFTEST is not set
+CONFIG_NLATTR=y
+CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
+# CONFIG_CORDIC is not set
+# CONFIG_DDR is not set
+CONFIG_OID_REGISTRY=y
+CONFIG_UCS2_STRING=y
+CONFIG_FONT_SUPPORT=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_SG_SPLIT is not set
+CONFIG_ARCH_HAS_SG_CHAIN=y
+CONFIG_ARCH_HAS_MMIO_FLUSH=y
--- /dev/null
+#!/bin/sh
+# Build x86 emulator kernel image
+
+ARCH=i386 make tizen_emul_defconfig
+./scripts/config --set-str CONFIG_INITRAMFS_SOURCE ramfs/initramfs.i386
+ARCH=i386 CROSS_COMPILE='' make -j8
--- /dev/null
+#!/bin/sh
+# Build x86 emulator kernel image
+
+ARCH=x86_64 make tizen_emul_defconfig
+./scripts/config --set-str CONFIG_INITRAMFS_SOURCE ramfs/initramfs.x86_64
+ARCH=x86_64 CROSS_COMPILE='' make -j8
source "drivers/fpga/Kconfig"
+# for maru board
+source "drivers/maru/Kconfig"
+
endmenu
obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_NVMEM) += nvmem/
obj-$(CONFIG_FPGA) += fpga/
+
+# for maru board
+obj-$(CONFIG_MARU) += maru/
obj-$(CONFIG_TEGRA_HOST1X) += host1x/
obj-y += drm/ vga/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
+
+# for maru board
+obj-y += yagl/
source "drivers/gpu/drm/imx/Kconfig"
source "drivers/gpu/drm/vc4/Kconfig"
+
+# for maru board
+source "drivers/gpu/drm/vigs/Kconfig"
obj-y += panel/
obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
+# for maru board
+obj-$(CONFIG_DRM_VIGS) += vigs/
--- /dev/null
+#
+# VIGS configuration
+#
+
+config DRM_VIGS
+ tristate "DRM Support for VIGS"
+ depends on DRM && PCI
+ default n
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+ help
+ This module enables VIGS passthrough from emulated system
+ to hypervisor (for example, QEMU).
+
+config DRM_VIGS_DEBUG
+ bool "VIGS debug messages"
+ depends on DRM_VIGS
+ default no
+ help
+ Enable VIGS debug messages.
--- /dev/null
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/vigs -Werror
+vigs_drm-y := main.o \
+ vigs_driver.o \
+ vigs_gem.o \
+ vigs_surface.o \
+ vigs_execbuffer.o \
+ vigs_device.o \
+ vigs_mman.o \
+ vigs_crtc.o \
+ vigs_output.o \
+ vigs_framebuffer.o \
+ vigs_comm.o \
+ vigs_fbdev.o \
+ vigs_irq.o \
+ vigs_fence.o \
+ vigs_fenceman.o \
+ vigs_file.o \
+ vigs_plane.o \
+ vigs_dp.o \
+ vigs_dmabuf.o
+
+obj-$(CONFIG_DRM_VIGS) += vigs_drm.o
--- /dev/null
+#include "vigs_driver.h"
+#include <linux/module.h>
+#include <linux/init.h>
+
+MODULE_AUTHOR("Stanislav Vorobiov");
+MODULE_LICENSE("Dual BSD/GPL");
+
+int vigs_init(void)
+{
+ int ret = vigs_driver_register();
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+void vigs_cleanup(void)
+{
+ vigs_driver_unregister();
+}
+
+module_init(vigs_init);
+module_exit(vigs_cleanup);
--- /dev/null
+#include "vigs_comm.h"
+#include "vigs_device.h"
+#include "vigs_execbuffer.h"
+#include "vigs_regs.h"
+#include "vigs_fence.h"
+#include <drm/vigs_drm.h>
+
+static int vigs_comm_alloc(struct vigs_comm *comm,
+ unsigned long size,
+ void **ptr)
+{
+ int ret;
+
+ if (!comm->execbuffer ||
+ (vigs_gem_size(&comm->execbuffer->gem) < size)) {
+ if (comm->execbuffer) {
+ drm_gem_object_unreference_unlocked(&comm->execbuffer->gem.base);
+ comm->execbuffer = NULL;
+ }
+
+ ret = vigs_execbuffer_create(comm->vigs_dev,
+ size,
+ true,
+ &comm->execbuffer);
+
+ if (ret != 0) {
+ DRM_ERROR("unable to create execbuffer\n");
+ return ret;
+ }
+
+ vigs_gem_reserve(&comm->execbuffer->gem);
+
+ ret = vigs_gem_kmap(&comm->execbuffer->gem);
+
+ vigs_gem_unreserve(&comm->execbuffer->gem);
+
+ if (ret != 0) {
+ DRM_ERROR("unable to kmap execbuffer\n");
+
+ drm_gem_object_unreference_unlocked(&comm->execbuffer->gem.base);
+ comm->execbuffer = NULL;
+
+ return ret;
+ }
+ }
+
+ *ptr = comm->execbuffer->gem.kptr;
+
+ return 0;
+}
+
+static int vigs_comm_prepare(struct vigs_comm *comm,
+ vigsp_cmd cmd,
+ unsigned long request_size,
+ void **request)
+{
+ int ret;
+ void *ptr;
+ struct vigsp_cmd_batch_header *batch_header;
+ struct vigsp_cmd_request_header *request_header;
+
+ ret = vigs_comm_alloc(comm,
+ sizeof(*batch_header) +
+ sizeof(*request_header) +
+ request_size,
+ &ptr);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ batch_header = ptr;
+ request_header = (struct vigsp_cmd_request_header*)(batch_header + 1);
+
+ batch_header->fence_seq = 0;
+ batch_header->size = sizeof(*request_header) + request_size;
+
+ request_header->cmd = cmd;
+ request_header->size = request_size;
+
+ if (request) {
+ *request = (request_header + 1);
+ }
+
+ return 0;
+}
+
+static void vigs_comm_exec_internal(struct vigs_comm *comm,
+ struct vigs_execbuffer *execbuffer)
+{
+ writel(vigs_gem_offset(&execbuffer->gem), comm->io_ptr + VIGS_REG_EXEC);
+}
+
+static int vigs_comm_init(struct vigs_comm *comm)
+{
+ int ret;
+ struct vigsp_cmd_init_request *request;
+
+ ret = vigs_comm_prepare(comm,
+ vigsp_cmd_init,
+ sizeof(*request),
+ (void**)&request);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ request->client_version = VIGS_PROTOCOL_VERSION;
+ request->server_version = 0;
+
+ vigs_comm_exec_internal(comm, comm->execbuffer);
+
+ if (request->server_version != VIGS_PROTOCOL_VERSION) {
+ DRM_ERROR("protocol version mismatch, expected %u, actual %u\n",
+ VIGS_PROTOCOL_VERSION,
+ request->server_version);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void vigs_comm_exit(struct vigs_comm *comm)
+{
+ int ret;
+
+ ret = vigs_comm_prepare(comm, vigsp_cmd_exit, 0, NULL);
+
+ if (ret != 0) {
+ return;
+ }
+
+ vigs_comm_exec_internal(comm, comm->execbuffer);
+}
+
+int vigs_comm_create(struct vigs_device *vigs_dev,
+ struct vigs_comm **comm)
+{
+ int ret = 0;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ *comm = kzalloc(sizeof(**comm), GFP_KERNEL);
+
+ if (!*comm) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ (*comm)->vigs_dev = vigs_dev;
+ (*comm)->io_ptr = vigs_dev->io_map->handle;
+
+ ret = vigs_comm_init(*comm);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ mutex_init(&(*comm)->mutex);
+
+ return 0;
+
+fail2:
+ if ((*comm)->execbuffer) {
+ drm_gem_object_unreference_unlocked(&(*comm)->execbuffer->gem.base);
+ }
+ kfree(*comm);
+fail1:
+ *comm = NULL;
+
+ return ret;
+}
+
+void vigs_comm_destroy(struct vigs_comm *comm)
+{
+ DRM_DEBUG_DRIVER("enter\n");
+
+ mutex_destroy(&comm->mutex);
+ vigs_comm_exit(comm);
+ if (comm->execbuffer) {
+ drm_gem_object_unreference_unlocked(&comm->execbuffer->gem.base);
+ }
+ kfree(comm);
+}
+
+void vigs_comm_exec(struct vigs_comm *comm,
+ struct vigs_execbuffer *execbuffer)
+{
+ vigs_comm_exec_internal(comm, execbuffer);
+}
+
+int vigs_comm_reset(struct vigs_comm *comm)
+{
+ int ret;
+
+ mutex_lock(&comm->mutex);
+
+ ret = vigs_comm_prepare(comm, vigsp_cmd_reset, 0, NULL);
+
+ if (ret == 0) {
+ vigs_comm_exec_internal(comm, comm->execbuffer);
+ }
+
+ mutex_unlock(&comm->mutex);
+
+ return ret;
+}
+
+int vigs_comm_create_surface(struct vigs_comm *comm,
+ u32 width,
+ u32 height,
+ u32 stride,
+ vigsp_surface_format format,
+ vigsp_surface_id id)
+{
+ int ret;
+ struct vigsp_cmd_create_surface_request *request;
+
+ DRM_DEBUG_DRIVER("width = %u, height = %u, stride = %u, fmt = %d, id = %u\n",
+ width,
+ height,
+ stride,
+ format,
+ id);
+
+ mutex_lock(&comm->mutex);
+
+ ret = vigs_comm_prepare(comm,
+ vigsp_cmd_create_surface,
+ sizeof(*request),
+ (void**)&request);
+
+ if (ret == 0) {
+ request->width = width;
+ request->height = height;
+ request->stride = stride;
+ request->format = format;
+ request->id = id;
+
+ vigs_comm_exec_internal(comm, comm->execbuffer);
+ }
+
+ mutex_unlock(&comm->mutex);
+
+ return ret;
+}
+
+int vigs_comm_destroy_surface(struct vigs_comm *comm, vigsp_surface_id id)
+{
+ int ret;
+ struct vigsp_cmd_destroy_surface_request *request;
+
+ DRM_DEBUG_DRIVER("id = %u\n", id);
+
+ mutex_lock(&comm->mutex);
+
+ ret = vigs_comm_prepare(comm,
+ vigsp_cmd_destroy_surface,
+ sizeof(*request),
+ (void**)&request);
+
+ if (ret == 0) {
+ request->id = id;
+
+ vigs_comm_exec_internal(comm, comm->execbuffer);
+ }
+
+ mutex_unlock(&comm->mutex);
+
+ return ret;
+}
+
+int vigs_comm_set_root_surface(struct vigs_comm *comm,
+ vigsp_surface_id id,
+ bool scanout,
+ vigsp_offset offset)
+{
+ int ret;
+ struct vigs_fence *fence = NULL;
+ struct vigsp_cmd_set_root_surface_request *request;
+
+ DRM_DEBUG_DRIVER("id = %u, scanout = %d, offset = %u\n",
+ id, scanout, offset);
+
+ if (scanout) {
+ /*
+ * We only need to fence this if surface is
+ * scanout, this is in order not to display garbage
+ * on page flip.
+ */
+
+ ret = vigs_fence_create(comm->vigs_dev->fenceman, &fence);
+
+ if (ret != 0) {
+ return ret;
+ }
+ }
+
+ mutex_lock(&comm->mutex);
+
+ ret = vigs_comm_prepare(comm,
+ vigsp_cmd_set_root_surface,
+ sizeof(*request),
+ (void**)&request);
+
+ if (ret == 0) {
+ request->id = id;
+ request->scanout = scanout;
+ request->offset = offset;
+
+ if (fence) {
+ vigs_execbuffer_fence(comm->execbuffer, fence);
+ }
+
+ vigs_comm_exec_internal(comm, comm->execbuffer);
+ }
+
+ mutex_unlock(&comm->mutex);
+
+ if ((ret == 0) && fence) {
+ vigs_fence_wait(fence, false);
+ }
+
+ vigs_fence_unref(fence);
+
+ return ret;
+}
+
+int vigs_comm_update_vram(struct vigs_comm *comm,
+ vigsp_surface_id id,
+ vigsp_offset offset)
+{
+ int ret;
+ struct vigs_fence *fence;
+ struct vigsp_cmd_update_vram_request *request;
+
+ DRM_DEBUG_DRIVER("id = %u, offset = %u\n", id, offset);
+
+ ret = vigs_fence_create(comm->vigs_dev->fenceman, &fence);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ mutex_lock(&comm->mutex);
+
+ ret = vigs_comm_prepare(comm,
+ vigsp_cmd_update_vram,
+ sizeof(*request),
+ (void**)&request);
+
+ if (ret == 0) {
+ request->sfc_id = id;
+ request->offset = offset;
+
+ vigs_execbuffer_fence(comm->execbuffer, fence);
+
+ vigs_comm_exec_internal(comm, comm->execbuffer);
+ }
+
+ mutex_unlock(&comm->mutex);
+
+ if (ret == 0) {
+ vigs_fence_wait(fence, false);
+ }
+
+ vigs_fence_unref(fence);
+
+ return ret;
+}
+
+int vigs_comm_update_gpu(struct vigs_comm *comm,
+ vigsp_surface_id id,
+ u32 width,
+ u32 height,
+ vigsp_offset offset)
+{
+ int ret;
+ struct vigs_fence *fence;
+ struct vigsp_cmd_update_gpu_request *request;
+
+ DRM_DEBUG_DRIVER("id = %u, offset = %u\n", id, offset);
+
+ ret = vigs_fence_create(comm->vigs_dev->fenceman, &fence);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ mutex_lock(&comm->mutex);
+
+ ret = vigs_comm_prepare(comm,
+ vigsp_cmd_update_gpu,
+ sizeof(*request) + sizeof(struct vigsp_rect),
+ (void**)&request);
+
+ if (ret == 0) {
+ request->sfc_id = id;
+ request->offset = offset;
+ request->num_entries = 1;
+ request->entries[0].pos.x = 0;
+ request->entries[0].pos.y = 0;
+ request->entries[0].size.w = width;
+ request->entries[0].size.h = height;
+
+ vigs_execbuffer_fence(comm->execbuffer, fence);
+
+ vigs_comm_exec_internal(comm, comm->execbuffer);
+ }
+
+ mutex_unlock(&comm->mutex);
+
+ if (ret == 0) {
+ vigs_fence_wait(fence, false);
+ }
+
+ vigs_fence_unref(fence);
+
+ return ret;
+}
+
+int vigs_comm_set_plane(struct vigs_comm *comm,
+ u32 plane,
+ u32 width,
+ u32 height,
+ vigsp_plane_format format,
+ vigsp_surface_id surfaces[4],
+ unsigned int src_x,
+ unsigned int src_y,
+ unsigned int src_w,
+ unsigned int src_h,
+ int dst_x,
+ int dst_y,
+ unsigned int dst_w,
+ unsigned int dst_h,
+ int z_pos,
+ int hflip,
+ int vflip,
+ int rotation)
+{
+ int ret;
+ struct vigsp_cmd_set_plane_request *request;
+
+ DRM_DEBUG_DRIVER("plane = %u, src_x = %u, src_y = %u, src_w = %u, src_h = %u, dst_x = %d, dst_y = %d, dst_w = %u, dst_h = %u, z_pos = %d, hflip = %d, vflip = %d, rotation = %d\n",
+ plane, src_x, src_y, src_w, src_h,
+ dst_x, dst_y, dst_w, dst_h, z_pos, hflip, vflip,
+ rotation);
+
+ mutex_lock(&comm->mutex);
+
+ ret = vigs_comm_prepare(comm,
+ vigsp_cmd_set_plane,
+ sizeof(*request),
+ (void**)&request);
+
+ if (ret == 0) {
+ request->plane = plane;
+ request->width = width;
+ request->height = height;
+ request->format = format;
+ memcpy(request->surfaces, surfaces, sizeof(request->surfaces));
+ request->src_rect.pos.x = src_x;
+ request->src_rect.pos.y = src_y;
+ request->src_rect.size.w = src_w;
+ request->src_rect.size.h = src_h;
+ request->dst_x = dst_x;
+ request->dst_y = dst_y;
+ request->dst_size.w = dst_w;
+ request->dst_size.h = dst_h;
+ request->z_pos = z_pos;
+ request->hflip = hflip;
+ request->vflip = vflip;
+ request->rotation = rotation;
+
+ vigs_comm_exec_internal(comm, comm->execbuffer);
+ }
+
+ mutex_unlock(&comm->mutex);
+
+ return ret;
+}
+
+int vigs_comm_fence(struct vigs_comm *comm, struct vigs_fence *fence)
+{
+ struct vigsp_cmd_batch_header *batch_header;
+ int ret;
+
+ DRM_DEBUG_DRIVER("seq = %u\n", fence->seq);
+
+ mutex_lock(&comm->mutex);
+
+ ret = vigs_comm_alloc(comm,
+ sizeof(*batch_header),
+ (void**)&batch_header);
+
+ if (ret != 0) {
+ mutex_unlock(&comm->mutex);
+
+ return ret;
+ }
+
+ batch_header->fence_seq = 0;
+ batch_header->size = 0;
+
+ vigs_execbuffer_fence(comm->execbuffer, fence);
+
+ vigs_comm_exec_internal(comm, comm->execbuffer);
+
+ mutex_unlock(&comm->mutex);
+
+ return 0;
+}
+
+int vigs_comm_get_protocol_version_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vigs_get_protocol_version *args = data;
+
+ args->version = VIGS_PROTOCOL_VERSION;
+
+ return 0;
+}
--- /dev/null
+#ifndef _VIGS_COMM_H_
+#define _VIGS_COMM_H_
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include "vigs_protocol.h"
+
+struct drm_device;
+struct drm_file;
+struct vigs_device;
+struct vigs_execbuffer;
+struct vigs_fence;
+
+struct vigs_comm
+{
+ struct vigs_device *vigs_dev;
+
+ /*
+ * From vigs_device::io_map::handle for speed.
+ */
+ void __iomem *io_ptr;
+
+ /*
+ * For synchronizing all calls.
+ */
+ struct mutex mutex;
+
+ /*
+ * For internal use.
+ */
+ struct vigs_execbuffer *execbuffer;
+};
+
+int vigs_comm_create(struct vigs_device *vigs_dev,
+ struct vigs_comm **comm);
+
+void vigs_comm_destroy(struct vigs_comm *comm);
+
+void vigs_comm_exec(struct vigs_comm *comm,
+ struct vigs_execbuffer *execbuffer);
+
+int vigs_comm_reset(struct vigs_comm *comm);
+
+int vigs_comm_create_surface(struct vigs_comm *comm,
+ u32 width,
+ u32 height,
+ u32 stride,
+ vigsp_surface_format format,
+ vigsp_surface_id id);
+
+int vigs_comm_destroy_surface(struct vigs_comm *comm, vigsp_surface_id id);
+
+int vigs_comm_set_root_surface(struct vigs_comm *comm,
+ vigsp_surface_id id,
+ bool scanout,
+ vigsp_offset offset);
+
+int vigs_comm_update_vram(struct vigs_comm *comm,
+ vigsp_surface_id id,
+ vigsp_offset offset);
+
+int vigs_comm_update_gpu(struct vigs_comm *comm,
+ vigsp_surface_id id,
+ u32 width,
+ u32 height,
+ vigsp_offset offset);
+
+int vigs_comm_set_plane(struct vigs_comm *comm,
+ u32 plane,
+ u32 width,
+ u32 height,
+ vigsp_plane_format format,
+ vigsp_surface_id surfaces[4],
+ unsigned int src_x,
+ unsigned int src_y,
+ unsigned int src_w,
+ unsigned int src_h,
+ int dst_x,
+ int dst_y,
+ unsigned int dst_w,
+ unsigned int dst_h,
+ int z_pos,
+ int hflip,
+ int vflip,
+ int rotation);
+
+int vigs_comm_fence(struct vigs_comm *comm, struct vigs_fence *fence);
+
+/*
+ * IOCTLs
+ * @{
+ */
+
+int vigs_comm_get_protocol_version_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
+#endif
--- /dev/null
+#include "vigs_crtc.h"
+#include "vigs_device.h"
+#include "vigs_framebuffer.h"
+#include "vigs_surface.h"
+#include "vigs_comm.h"
+#include "vigs_fbdev.h"
+#include "drm_crtc_helper.h"
+#include <linux/console.h>
+
+static int vigs_crtc_update(struct drm_crtc *crtc,
+ struct drm_framebuffer *old_fb)
+{
+ struct vigs_device *vigs_dev = crtc->dev->dev_private;
+ struct vigs_framebuffer *vigs_old_fb = NULL;
+ struct vigs_framebuffer *vigs_fb;
+ int ret;
+
+ /*
+ * New framebuffer has been attached, notify the host that
+ * root surface has been updated.
+ */
+
+ if (!crtc->fb) {
+ DRM_ERROR("crtc->fb is NULL\n");
+ return -EINVAL;
+ }
+
+ if (old_fb) {
+ vigs_old_fb = fb_to_vigs_fb(old_fb);
+ }
+
+ vigs_fb = fb_to_vigs_fb(crtc->fb);
+
+ if (vigs_fb->surfaces[0]->scanout) {
+retry:
+ ret = vigs_framebuffer_pin(vigs_fb);
+
+ if (ret != 0) {
+ /*
+ * In condition of very intense GEM operations
+ * and with small amount of VRAM memory it's possible that
+ * GEM pin will be failing for some time, thus, framebuffer pin
+ * will be failing. This is unavoidable with current TTM design,
+ * thus, if someone is intensively
+ * reserves/unreserves GEMs then ttm_bo_validate can fail even if there
+ * is free space in a placement. Even worse, ttm_bo_validate fails with
+ * ENOMEM so it's not possible to tell if it's a temporary failure due
+ * to reserve/unreserve pressure or constant one due to memory shortage.
+ * We assume here that it's temporary and retry framebuffer pin. This
+ * is relatively safe since we only pin GEMs on pageflip and user
+ * should have started the VM with VRAM size equal to at least 3 frames,
+ * thus, 2 frame will always be free and we can always pin 1 frame.
+ */
+ cpu_relax();
+ goto retry;
+ }
+
+ vigs_gem_reserve(&vigs_fb->surfaces[0]->gem);
+
+ ret = vigs_comm_set_root_surface(vigs_dev->comm,
+ vigs_fb->surfaces[0]->id,
+ 1,
+ vigs_gem_offset(&vigs_fb->surfaces[0]->gem));
+
+ vigs_gem_unreserve(&vigs_fb->surfaces[0]->gem);
+
+ if (ret != 0) {
+ vigs_framebuffer_unpin(vigs_fb);
+
+ return ret;
+ }
+ } else {
+ ret = vigs_comm_set_root_surface(vigs_dev->comm,
+ vigs_fb->surfaces[0]->id,
+ 0,
+ 0);
+
+ if (ret != 0) {
+ return ret;
+ }
+ }
+
+ if (vigs_old_fb && vigs_old_fb->surfaces[0]->scanout) {
+ vigs_framebuffer_unpin(vigs_old_fb);
+ }
+
+ return 0;
+}
+
+static void vigs_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct vigs_crtc *vigs_crtc = crtc_to_vigs_crtc(crtc);
+
+ DRM_DEBUG_KMS("enter");
+
+ drm_crtc_cleanup(crtc);
+
+ kfree(vigs_crtc);
+}
+
+static void vigs_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct vigs_device *vigs_dev = crtc->dev->dev_private;
+ int blank, i;
+ struct fb_event event;
+
+ DRM_DEBUG_KMS("enter: in_dpms = %d, mode = %d\n",
+ vigs_dev->in_dpms,
+ mode);
+
+ if (vigs_dev->in_dpms) {
+ return;
+ }
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ blank = FB_BLANK_UNBLANK;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ blank = FB_BLANK_NORMAL;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ blank = FB_BLANK_VSYNC_SUSPEND;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ blank = FB_BLANK_POWERDOWN;
+ break;
+ default:
+ DRM_ERROR("unspecified mode %d\n", mode);
+ return;
+ }
+
+ event.info = vigs_dev->fbdev->base.fbdev;
+ event.data = ␣
+
+ /*
+ * We can't just 'console_lock' here, since
+ * this may result in deadlock:
+ * fb func:
+ * console_lock();
+ * mutex_lock(&dev->mode_config.mutex);
+ * DRM func:
+ * mutex_lock(&dev->mode_config.mutex);
+ * console_lock();
+ *
+ * So we just try to acquire it for 5 times with a delay
+ * and then just skip.
+ *
+ * This code is here only because pm is currently done via
+ * backlight which is bad, we need to make proper pm via
+ * kernel support.
+ */
+ for (i = 0; i < 5; ++i) {
+ if (console_trylock()) {
+ /*
+ * We must set in_dpms to true while walking
+ * fb call chain because a callback inside the
+ * call chain might do FB_BLANK on its own, i.e.
+ * 'vigs_fbdev_dpms' might get called from here. To avoid
+ * this we set in_dpms to true and 'vigs_fbdev_dpms'
+ * checks this and returns.
+ */
+ vigs_dev->in_dpms = true;
+
+ fb_notifier_call_chain(FB_EVENT_BLANK, &event);
+
+ vigs_dev->in_dpms = false;
+
+ console_unlock();
+ return;
+ }
+ msleep(100);
+ DRM_ERROR("unable to lock console, trying again\n");
+ }
+
+ DRM_ERROR("unable to lock console, skipping fb call chain\n");
+}
+
+static bool vigs_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ return true;
+}
+
+static int vigs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ DRM_DEBUG_KMS("enter: x = %d, y = %d\n", x, y);
+
+ return vigs_crtc_update(crtc, old_fb);
+}
+
+static int vigs_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ DRM_DEBUG_KMS("enter: x = %d, y = %d\n", x, y);
+
+ return vigs_crtc_mode_set_base(crtc, x, y, old_fb);
+}
+
+static void vigs_crtc_prepare(struct drm_crtc *crtc)
+{
+ DRM_DEBUG_KMS("enter\n");
+}
+
+static void vigs_crtc_commit(struct drm_crtc *crtc)
+{
+ DRM_DEBUG_KMS("enter\n");
+}
+
+static void vigs_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static int vigs_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height)
+{
+ /*
+ * Not supported.
+ */
+
+ return 0;
+}
+
+static int vigs_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ /*
+ * Not supported.
+ */
+
+ return 0;
+}
+
+static int vigs_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ unsigned long flags;
+ struct vigs_device *vigs_dev = crtc->dev->dev_private;
+ struct drm_framebuffer *old_fb = crtc->fb;
+ int ret = -EINVAL;
+
+ mutex_lock(&vigs_dev->drm_dev->struct_mutex);
+
+ if (event) {
+ event->pipe = 0;
+
+ ret = drm_vblank_get(vigs_dev->drm_dev, 0);
+
+ if (ret != 0) {
+ DRM_ERROR("failed to acquire vblank counter\n");
+ goto out;
+ }
+
+ spin_lock_irqsave(&vigs_dev->drm_dev->event_lock, flags);
+ list_add_tail(&event->base.link,
+ &vigs_dev->pageflip_event_list);
+ spin_unlock_irqrestore(&vigs_dev->drm_dev->event_lock, flags);
+
+ crtc->fb = fb;
+ ret = vigs_crtc_update(crtc, old_fb);
+ if (ret != 0) {
+ crtc->fb = old_fb;
+ spin_lock_irqsave(&vigs_dev->drm_dev->event_lock, flags);
+ if (atomic_read(&vigs_dev->drm_dev->vblank[0].refcount) > 0) {
+ /*
+ * Only do this if event wasn't already processed.
+ */
+ drm_vblank_put(vigs_dev->drm_dev, 0);
+ list_del(&event->base.link);
+ }
+ spin_unlock_irqrestore(&vigs_dev->drm_dev->event_lock, flags);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+
+ return ret;
+}
+
+static void vigs_crtc_disable(struct drm_crtc *crtc)
+{
+ struct vigs_device *vigs_dev = crtc->dev->dev_private;
+ struct vigs_framebuffer *vigs_fb;
+
+ /*
+ * Framebuffer has been detached, notify the host that
+ * root surface is gone.
+ */
+
+ DRM_DEBUG_KMS("enter\n");
+
+ if (!crtc->fb) {
+ /*
+ * No current framebuffer, no need to notify the host.
+ */
+
+ return;
+ }
+
+ vigs_fb = fb_to_vigs_fb(crtc->fb);
+
+ vigs_comm_set_root_surface(vigs_dev->comm, 0, 0, 0);
+
+ if (vigs_fb->surfaces[0]->scanout) {
+ vigs_framebuffer_unpin(vigs_fb);
+ }
+}
+
+static const struct drm_crtc_funcs vigs_crtc_funcs =
+{
+ .cursor_set = vigs_crtc_cursor_set,
+ .cursor_move = vigs_crtc_cursor_move,
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = vigs_crtc_page_flip,
+ .destroy = vigs_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs vigs_crtc_helper_funcs =
+{
+ .dpms = vigs_crtc_dpms,
+ .mode_fixup = vigs_crtc_mode_fixup,
+ .mode_set = vigs_crtc_mode_set,
+ .mode_set_base = vigs_crtc_mode_set_base,
+ .prepare = vigs_crtc_prepare,
+ .commit = vigs_crtc_commit,
+ .load_lut = vigs_crtc_load_lut,
+ .disable = vigs_crtc_disable,
+};
+
+int vigs_crtc_init(struct vigs_device *vigs_dev)
+{
+ struct vigs_crtc *vigs_crtc;
+ int ret;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ vigs_crtc = kzalloc(sizeof(*vigs_crtc), GFP_KERNEL);
+
+ if (!vigs_crtc) {
+ return -ENOMEM;
+ }
+
+ ret = drm_crtc_init(vigs_dev->drm_dev,
+ &vigs_crtc->base,
+ &vigs_crtc_funcs);
+
+ if (ret != 0) {
+ kfree(vigs_crtc);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&vigs_crtc->base, &vigs_crtc_helper_funcs);
+
+ return 0;
+}
--- /dev/null
+#ifndef _VIGS_CRTC_H_
+#define _VIGS_CRTC_H_
+
+#include "drmP.h"
+
+struct vigs_device;
+
+struct vigs_crtc
+{
+ struct drm_crtc base;
+};
+
+static inline struct vigs_crtc *crtc_to_vigs_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct vigs_crtc, base);
+}
+
+int vigs_crtc_init(struct vigs_device *vigs_dev);
+
+#endif
--- /dev/null
+#include "vigs_device.h"
+#include "vigs_mman.h"
+#include "vigs_fenceman.h"
+#include "vigs_crtc.h"
+#include "vigs_output.h"
+#include "vigs_plane.h"
+#include "vigs_framebuffer.h"
+#include "vigs_comm.h"
+#include "vigs_fbdev.h"
+#include "vigs_execbuffer.h"
+#include "vigs_surface.h"
+#include "vigs_dp.h"
+#include <drm/vigs_drm.h>
+
+extern const struct dma_buf_ops vigs_dmabuf_ops;
+
+static void vigs_device_mman_vram_to_gpu(void *user_data,
+ struct ttm_buffer_object *bo)
+{
+ struct vigs_device *vigs_dev = user_data;
+ struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
+ struct vigs_surface *vigs_sfc = vigs_gem_to_vigs_surface(vigs_gem);
+ bool need_gpu_update = vigs_surface_need_gpu_update(vigs_sfc);
+
+ if (!vigs_sfc->is_gpu_dirty && need_gpu_update) {
+ DRM_INFO("vram_to_gpu: 0x%llX\n",
+ drm_vma_node_offset_addr(&bo->vma_node));
+ vigs_comm_update_gpu(vigs_dev->comm,
+ vigs_sfc->id,
+ vigs_sfc->width,
+ vigs_sfc->height,
+ vigs_gem_offset(vigs_gem));
+ } else {
+ DRM_INFO("vram_to_gpu: 0x%llX (no-op)\n",
+ drm_vma_node_offset_addr(&bo->vma_node));
+ }
+
+ vigs_sfc->is_gpu_dirty = false;
+}
+
+static void vigs_device_mman_gpu_to_vram(void *user_data,
+ struct ttm_buffer_object *bo,
+ unsigned long new_offset)
+{
+ struct vigs_device *vigs_dev = user_data;
+ struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
+ struct vigs_surface *vigs_sfc = vigs_gem_to_vigs_surface(vigs_gem);
+
+ if (vigs_surface_need_vram_update(vigs_sfc)) {
+ DRM_DEBUG_DRIVER("0x%llX\n",
+ drm_vma_node_offset_addr(&bo->vma_node));
+ vigs_comm_update_vram(vigs_dev->comm,
+ vigs_sfc->id,
+ new_offset);
+ } else {
+ DRM_DEBUG_DRIVER("0x%llX (no-op)\n",
+ drm_vma_node_offset_addr(&bo->vma_node));
+ }
+}
+
+static void vigs_device_mman_init_vma(void *user_data,
+ void *vma_data_opaque,
+ struct ttm_buffer_object *bo,
+ bool track_access)
+{
+ struct vigs_vma_data *vma_data = vma_data_opaque;
+ struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
+
+ if (vigs_gem->type != VIGS_GEM_TYPE_SURFACE) {
+ vma_data->sfc = NULL;
+ return;
+ }
+
+ vigs_vma_data_init(vma_data,
+ vigs_gem_to_vigs_surface(vigs_gem),
+ track_access);
+}
+
+static void vigs_device_mman_cleanup_vma(void *user_data,
+ void *vma_data_opaque)
+{
+ struct vigs_vma_data *vma_data = vma_data_opaque;
+
+ if (!vma_data->sfc) {
+ return;
+ }
+
+ vigs_vma_data_cleanup(vma_data);
+}
+
+static struct vigs_mman_ops mman_ops =
+{
+ .vram_to_gpu = &vigs_device_mman_vram_to_gpu,
+ .gpu_to_vram = &vigs_device_mman_gpu_to_vram,
+ .init_vma = &vigs_device_mman_init_vma,
+ .cleanup_vma = &vigs_device_mman_cleanup_vma
+};
+
+int vigs_device_init(struct vigs_device *vigs_dev,
+ struct drm_device *drm_dev,
+ struct pci_dev *pci_dev,
+ unsigned long flags)
+{
+ int ret;
+ u32 i;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ vigs_dev->dev = &pci_dev->dev;
+ vigs_dev->drm_dev = drm_dev;
+ vigs_dev->pci_dev = pci_dev;
+
+ INIT_LIST_HEAD(&vigs_dev->pageflip_event_list);
+
+ vigs_dev->vram_base = pci_resource_start(pci_dev, 0);
+ vigs_dev->vram_size = pci_resource_len(pci_dev, 0);
+
+ vigs_dev->ram_base = pci_resource_start(pci_dev, 1);
+ vigs_dev->ram_size = pci_resource_len(pci_dev, 1);
+
+ vigs_dev->io_base = pci_resource_start(pci_dev, 2);
+ vigs_dev->io_size = pci_resource_len(pci_dev, 2);
+
+ idr_init(&vigs_dev->surface_idr);
+ mutex_init(&vigs_dev->surface_idr_mutex);
+
+ if (!vigs_dev->vram_base || !vigs_dev->ram_base || !vigs_dev->io_base) {
+ DRM_ERROR("VRAM, RAM or IO bar not found on device\n");
+ ret = -ENODEV;
+ goto fail1;
+ }
+
+ if ((vigs_dev->io_size < sizeof(void*)) ||
+ ((vigs_dev->io_size % sizeof(void*)) != 0)) {
+ DRM_ERROR("IO bar has bad size: %pa bytes\n", &vigs_dev->io_size);
+ ret = -ENODEV;
+ goto fail1;
+ }
+
+ ret = drm_addmap(vigs_dev->drm_dev,
+ vigs_dev->io_base,
+ vigs_dev->io_size,
+ _DRM_REGISTERS,
+ 0,
+ &vigs_dev->io_map);
+ if (ret != 0) {
+ goto fail1;
+ }
+
+ ret = vigs_mman_create(vigs_dev->vram_base, vigs_dev->vram_size,
+ vigs_dev->ram_base, vigs_dev->ram_size,
+ sizeof(struct vigs_vma_data),
+ &mman_ops,
+ vigs_dev,
+ &vigs_dev->mman);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ vigs_dev->obj_dev = ttm_object_device_init(vigs_dev->mman->mem_global_ref.object,
+ 12, &vigs_dmabuf_ops);
+
+ if (!vigs_dev->obj_dev) {
+ DRM_ERROR("Unable to initialize obj_dev\n");
+ ret = -ENOMEM;
+ goto fail3;
+ }
+
+ ret = vigs_fenceman_create(&vigs_dev->fenceman);
+
+ if (ret != 0) {
+ goto fail4;
+ }
+
+ ret = vigs_dp_create(vigs_dev, &vigs_dev->dp);
+
+ if (ret != 0) {
+ goto fail5;
+ }
+
+ ret = vigs_comm_create(vigs_dev, &vigs_dev->comm);
+
+ if (ret != 0) {
+ goto fail6;
+ }
+
+ spin_lock_init(&vigs_dev->irq_lock);
+
+ drm_mode_config_init(vigs_dev->drm_dev);
+
+ vigs_framebuffer_config_init(vigs_dev);
+
+ ret = vigs_crtc_init(vigs_dev);
+
+ if (ret != 0) {
+ goto fail7;
+ }
+
+ ret = vigs_output_init(vigs_dev);
+
+ if (ret != 0) {
+ goto fail7;
+ }
+
+ for (i = 0; i < VIGS_MAX_PLANES; ++i) {
+ ret = vigs_plane_init(vigs_dev, i);
+
+ if (ret != 0) {
+ goto fail7;
+ }
+ }
+
+ ret = drm_vblank_init(drm_dev, 1);
+
+ if (ret != 0) {
+ goto fail7;
+ }
+
+ /*
+ * We allow VBLANK interrupt disabling right from the start. There's
+ * no point in "waiting until first modeset".
+ */
+ drm_dev->vblank_disable_allowed = 1;
+
+ ret = drm_irq_install(drm_dev);
+
+ if (ret != 0) {
+ goto fail8;
+ }
+
+ ret = vigs_fbdev_create(vigs_dev, &vigs_dev->fbdev);
+
+ if (ret != 0) {
+ goto fail9;
+ }
+
+ return 0;
+
+fail9:
+ drm_irq_uninstall(drm_dev);
+fail8:
+ drm_vblank_cleanup(drm_dev);
+fail7:
+ drm_mode_config_cleanup(vigs_dev->drm_dev);
+ vigs_comm_destroy(vigs_dev->comm);
+fail6:
+ vigs_dp_destroy(vigs_dev->dp);
+fail5:
+ vigs_fenceman_destroy(vigs_dev->fenceman);
+fail4:
+ ttm_object_device_release(&vigs_dev->obj_dev);
+fail3:
+ vigs_mman_destroy(vigs_dev->mman);
+fail2:
+ drm_rmmap(vigs_dev->drm_dev, vigs_dev->io_map);
+fail1:
+ idr_destroy(&vigs_dev->surface_idr);
+ mutex_destroy(&vigs_dev->surface_idr_mutex);
+
+ return ret;
+}
+
+void vigs_device_cleanup(struct vigs_device *vigs_dev)
+{
+ DRM_DEBUG_DRIVER("enter\n");
+
+ vigs_fbdev_destroy(vigs_dev->fbdev);
+ drm_irq_uninstall(vigs_dev->drm_dev);
+ drm_vblank_cleanup(vigs_dev->drm_dev);
+ drm_mode_config_cleanup(vigs_dev->drm_dev);
+ vigs_comm_destroy(vigs_dev->comm);
+ vigs_dp_destroy(vigs_dev->dp);
+ vigs_fenceman_destroy(vigs_dev->fenceman);
+ ttm_object_device_release(&vigs_dev->obj_dev);
+ vigs_mman_destroy(vigs_dev->mman);
+ drm_rmmap(vigs_dev->drm_dev, vigs_dev->io_map);
+ idr_destroy(&vigs_dev->surface_idr);
+ mutex_destroy(&vigs_dev->surface_idr_mutex);
+}
+
+int vigs_device_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct vigs_device *vigs_dev = file_priv->minor->dev->dev_private;
+
+ if (vigs_dev == NULL) {
+ DRM_ERROR("no device\n");
+ return -EINVAL;
+ }
+
+ return vigs_mman_mmap(vigs_dev->mman,
+ filp,
+ vma,
+ vigs_dev->track_gem_access);
+}
+
+int vigs_device_add_surface(struct vigs_device *vigs_dev,
+ struct vigs_surface *sfc,
+ vigsp_surface_id* id)
+{
+ int ret;
+
+ mutex_lock(&vigs_dev->surface_idr_mutex);
+
+ ret = idr_alloc(&vigs_dev->surface_idr, sfc, 1, 0, GFP_KERNEL);
+
+ mutex_unlock(&vigs_dev->surface_idr_mutex);
+
+ if (ret < 0) {
+ return ret;
+ }
+
+ *id = ret;
+
+ return 0;
+}
+
+void vigs_device_remove_surface(struct vigs_device *vigs_dev,
+ vigsp_surface_id sfc_id)
+{
+ mutex_lock(&vigs_dev->surface_idr_mutex);
+ idr_remove(&vigs_dev->surface_idr, sfc_id);
+ mutex_unlock(&vigs_dev->surface_idr_mutex);
+}
+
+struct vigs_surface
+ *vigs_device_reference_surface(struct vigs_device *vigs_dev,
+ vigsp_surface_id sfc_id)
+{
+ struct vigs_surface *sfc;
+
+ mutex_lock(&vigs_dev->surface_idr_mutex);
+
+ sfc = idr_find(&vigs_dev->surface_idr, sfc_id);
+
+ if (sfc) {
+ if (vigs_gem_freed(&sfc->gem)) {
+ sfc = NULL;
+ } else {
+ drm_gem_object_reference(&sfc->gem.base);
+ }
+ }
+
+ mutex_unlock(&vigs_dev->surface_idr_mutex);
+
+ return sfc;
+}
+
+int vigs_device_add_surface_unlocked(struct vigs_device *vigs_dev,
+ struct vigs_surface *sfc,
+ vigsp_surface_id* id)
+{
+ int ret;
+
+ mutex_lock(&vigs_dev->drm_dev->struct_mutex);
+ ret = vigs_device_add_surface(vigs_dev, sfc, id);
+ mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+
+ return ret;
+}
+
+void vigs_device_remove_surface_unlocked(struct vigs_device *vigs_dev,
+ vigsp_surface_id sfc_id)
+{
+ mutex_lock(&vigs_dev->drm_dev->struct_mutex);
+ vigs_device_remove_surface(vigs_dev, sfc_id);
+ mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+}
--- /dev/null
+#ifndef _VIGS_DEVICE_H_
+#define _VIGS_DEVICE_H_
+
+#include "drmP.h"
+#include "vigs_protocol.h"
+
+struct vigs_mman;
+struct vigs_fenceman;
+struct vigs_dp;
+struct vigs_comm;
+struct vigs_fbdev;
+struct vigs_surface;
+
+struct vigs_device
+{
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct pci_dev *pci_dev;
+
+ struct list_head pageflip_event_list;
+
+ resource_size_t vram_base;
+ resource_size_t vram_size;
+
+ resource_size_t ram_base;
+ resource_size_t ram_size;
+
+ resource_size_t io_base;
+ resource_size_t io_size;
+
+ struct idr surface_idr;
+ struct mutex surface_idr_mutex;
+
+ /* Map of IO BAR. */
+ drm_local_map_t *io_map;
+
+ struct vigs_mman *mman;
+
+ struct ttm_object_device *obj_dev;
+
+ struct vigs_fenceman *fenceman;
+
+ struct vigs_dp *dp;
+
+ struct vigs_comm *comm;
+
+ struct vigs_fbdev *fbdev;
+
+ /*
+ * We need this because it's essential to read 'lower' and 'upper'
+ * fence acks atomically in IRQ handler and on SMP systems IRQ handler
+ * can be run on several CPUs concurrently.
+ */
+ spinlock_t irq_lock;
+
+ /*
+ * A hack we're forced to have in order to tell if we
+ * need to track GEM access or not in 'vigs_device_mmap'.
+ * current's 'mmap_sem' is write-locked while this is true,
+ * so no race will occur.
+ */
+ bool track_gem_access;
+
+ /*
+ * A hack to tell if DPMS callback is called from inside
+ * 'fb_blank' or vice-versa.
+ */
+ bool in_dpms;
+};
+
+int vigs_device_init(struct vigs_device *vigs_dev,
+ struct drm_device *drm_dev,
+ struct pci_dev *pci_dev,
+ unsigned long flags);
+
+void vigs_device_cleanup(struct vigs_device *vigs_dev);
+
+int vigs_device_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int vigs_device_add_surface(struct vigs_device *vigs_dev,
+ struct vigs_surface *sfc,
+ vigsp_surface_id* id);
+
+void vigs_device_remove_surface(struct vigs_device *vigs_dev,
+ vigsp_surface_id sfc_id);
+
+struct vigs_surface
+ *vigs_device_reference_surface(struct vigs_device *vigs_dev,
+ vigsp_surface_id sfc_id);
+
+/*
+ * Locks drm_device::struct_mutex.
+ * @{
+ */
+
+int vigs_device_add_surface_unlocked(struct vigs_device *vigs_dev,
+ struct vigs_surface *sfc,
+ vigsp_surface_id* id);
+
+void vigs_device_remove_surface_unlocked(struct vigs_device *vigs_dev,
+ vigsp_surface_id sfc_id);
+
+/*
+ * @}
+ */
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Based on drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+ *
+ * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2014 Samsung Electronics Co., Ltd.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vigs_gem.h"
+#include <linux/dma-buf.h>
+
+static int vigs_dmabuf_attach(struct dma_buf *dma_buf,
+ struct device *target_dev,
+ struct dma_buf_attachment *attach)
+{
+ DRM_DEBUG_PRIME("enter");
+
+ return -ENOSYS;
+}
+
+static void vigs_dmabuf_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+{
+ DRM_DEBUG_PRIME("enter");
+}
+
+static struct sg_table *vigs_dmabuf_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ DRM_DEBUG_PRIME("enter");
+
+ return ERR_PTR(-ENOSYS);
+}
+
+static void vigs_dmabuf_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *sgb,
+ enum dma_data_direction dir)
+{
+ DRM_DEBUG_PRIME("enter");
+}
+
+static void *vigs_dmabuf_kmap(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ DRM_DEBUG_PRIME("enter");
+
+ return NULL;
+}
+
+static void *vigs_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ DRM_DEBUG_PRIME("enter");
+
+ return NULL;
+}
+
+static void vigs_dmabuf_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num,
+ void *addr)
+{
+ DRM_DEBUG_PRIME("enter");
+}
+
+static void vigs_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num,
+ void *addr)
+{
+ DRM_DEBUG_PRIME("enter");
+}
+
+static int vigs_dmabuf_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
+{
+ DRM_DEBUG_PRIME("enter");
+
+ return -ENOSYS;
+}
+
+static void *vigs_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ DRM_DEBUG_PRIME("enter");
+
+ return NULL;
+}
+
+static void vigs_dmabuf_vunmap(struct dma_buf *dma_buf,
+ void *vaddr)
+{
+ DRM_DEBUG_PRIME("enter");
+}
+
+static int vigs_dmabuf_begin_cpu_access(struct dma_buf *dma_buf,
+ size_t start,
+ size_t length,
+ enum dma_data_direction direction)
+{
+ DRM_DEBUG_PRIME("enter");
+
+ return 0;
+}
+
+static void vigs_dmabuf_end_cpu_access(struct dma_buf *dma_buf,
+ size_t start,
+ size_t length,
+ enum dma_data_direction direction)
+{
+ DRM_DEBUG_PRIME("enter");
+}
+
+const struct dma_buf_ops vigs_dmabuf_ops = {
+ .attach = vigs_dmabuf_attach,
+ .detach = vigs_dmabuf_detach,
+ .map_dma_buf = vigs_dmabuf_map,
+ .unmap_dma_buf = vigs_dmabuf_unmap,
+ .release = drm_gem_dmabuf_release,
+ .kmap = vigs_dmabuf_kmap,
+ .kmap_atomic = vigs_dmabuf_kmap_atomic,
+ .kunmap = vigs_dmabuf_kunmap,
+ .kunmap_atomic = vigs_dmabuf_kunmap_atomic,
+ .mmap = vigs_dmabuf_mmap,
+ .vmap = vigs_dmabuf_vmap,
+ .vunmap = vigs_dmabuf_vunmap,
+ .begin_cpu_access = vigs_dmabuf_begin_cpu_access,
+ .end_cpu_access = vigs_dmabuf_end_cpu_access,
+};
+
+int vigs_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t flags,
+ int *prime_fd)
+{
+ DRM_DEBUG_PRIME("enter");
+
+ return drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
+}
+
+int vigs_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv,
+ int fd,
+ uint32_t *handle)
+{
+ DRM_DEBUG_PRIME("enter");
+
+ return drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
+}
+
+struct dma_buf *vigs_dmabuf_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gem_obj,
+ int flags)
+{
+ struct vigs_gem_object *vigs_gem = gem_to_vigs_gem(gem_obj);
+
+ DRM_DEBUG_PRIME("enter");
+
+ return dma_buf_export(gem_obj,
+ &vigs_dmabuf_ops,
+ vigs_gem_size(vigs_gem),
+ flags);
+}
+
+struct drm_gem_object *vigs_dmabuf_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj;
+
+ DRM_DEBUG_PRIME("enter");
+
+ if (dma_buf->ops == &vigs_dmabuf_ops) {
+ obj = dma_buf->priv;
+
+ if (obj->dev == dev) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_reference(obj);
+ return obj;
+ }
+ }
+
+ return ERR_PTR(-ENOSYS);
+}
--- /dev/null
+#ifndef _VIGS_DMABUF_H_
+#define _VIGS_DMABUF_H_
+
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_file;
+struct dma_buf;
+struct drm_gem_object;
+
+int vigs_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t flags,
+ int *prime_fd);
+
+int vigs_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv,
+ int fd,
+ uint32_t *handle);
+
+struct dma_buf *vigs_dmabuf_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gem_obj,
+ int flags);
+
+struct drm_gem_object *vigs_dmabuf_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
+#endif /* _VIGS_DMABUF_H_ */
--- /dev/null
+#include "vigs_dp.h"
+#include "vigs_surface.h"
+#include "vigs_device.h"
+
+int vigs_dp_create(struct vigs_device *vigs_dev,
+ struct vigs_dp **dp)
+{
+ int ret = 0;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ *dp = kzalloc(sizeof(**dp), GFP_KERNEL);
+
+ if (!*dp) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ return 0;
+
+fail1:
+ *dp = NULL;
+
+ return ret;
+}
+
+void vigs_dp_destroy(struct vigs_dp *dp)
+{
+ DRM_DEBUG_DRIVER("enter\n");
+
+ kfree(dp);
+}
+
+void vigs_dp_remove_surface(struct vigs_dp *dp, struct vigs_surface *sfc)
+{
+ int i, j;
+
+ for (i = 0; i < VIGS_MAX_PLANES; ++i) {
+ for (j = 0; j < DRM_VIGS_NUM_DP_FB_BUF; ++j) {
+ if (dp->planes[i].fb_bufs[j].y == sfc) {
+ dp->planes[i].fb_bufs[j].y = NULL;
+ }
+ if (dp->planes[i].fb_bufs[j].c == sfc) {
+ dp->planes[i].fb_bufs[j].c = NULL;
+ }
+ }
+ }
+}
+
+int vigs_dp_surface_create_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct vigs_dp *dp = vigs_dev->dp;
+ struct drm_vigs_dp_create_surface *args = data;
+ struct vigs_surface *sfc = NULL;
+ bool busy;
+ uint32_t handle;
+ int ret;
+
+ if (args->dp_plane >= VIGS_MAX_PLANES) {
+ DRM_ERROR("bad DP plane = %u\n", args->dp_plane);
+ return -ENOMEM;
+ }
+
+ if (args->dp_fb_buf >= DRM_VIGS_NUM_DP_FB_BUF) {
+ DRM_ERROR("bad DP fb buf = %u\n", args->dp_fb_buf);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&vigs_dev->drm_dev->struct_mutex);
+
+ switch (args->dp_mem_flag) {
+ case DRM_VIGS_DP_FB_Y:
+ busy = dp->planes[args->dp_plane].fb_bufs[args->dp_fb_buf].y != NULL;
+ break;
+ case DRM_VIGS_DP_FB_C:
+ busy = dp->planes[args->dp_plane].fb_bufs[args->dp_fb_buf].c != NULL;
+ break;
+ default:
+ mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+ DRM_ERROR("bad DP mem flag = %u\n", args->dp_mem_flag);
+ return -ENOMEM;
+ }
+
+ mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+
+ if (busy) {
+ DRM_INFO("DP mem %u:%u:%u is busy\n", args->dp_plane,
+ args->dp_fb_buf,
+ args->dp_mem_flag);
+ return -ENOMEM;
+ }
+
+ ret = vigs_surface_create(vigs_dev,
+ args->width,
+ args->height,
+ args->stride,
+ args->format,
+ false,
+ &sfc);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ /*
+ * Check busy again since DP mem might
+ * gotten busy while we were creating our surface.
+ * If it's not busy then occupy it.
+ */
+
+ mutex_lock(&vigs_dev->drm_dev->struct_mutex);
+
+ switch (args->dp_mem_flag) {
+ case DRM_VIGS_DP_FB_Y:
+ if (dp->planes[args->dp_plane].fb_bufs[args->dp_fb_buf].y) {
+ busy = true;
+ } else {
+ dp->planes[args->dp_plane].fb_bufs[args->dp_fb_buf].y = sfc;
+ }
+ break;
+ case DRM_VIGS_DP_FB_C:
+ if (dp->planes[args->dp_plane].fb_bufs[args->dp_fb_buf].c) {
+ busy = true;
+ } else {
+ dp->planes[args->dp_plane].fb_bufs[args->dp_fb_buf].c = sfc;
+ }
+ break;
+ default:
+ drm_gem_object_unreference(&sfc->gem.base);
+ mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+ BUG();
+ return -ENOMEM;
+ }
+
+ mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+
+ if (busy) {
+ drm_gem_object_unreference_unlocked(&sfc->gem.base);
+
+ DRM_INFO("DP mem %u:%u:%u is busy\n", args->dp_plane,
+ args->dp_fb_buf,
+ args->dp_mem_flag);
+ return -ENOMEM;
+ }
+
+ ret = drm_gem_handle_create(file_priv,
+ &sfc->gem.base,
+ &handle);
+
+ if (ret == 0) {
+ args->handle = handle;
+ args->size = vigs_gem_size(&sfc->gem);
+ args->id = sfc->id;
+ } else {
+ /*
+ * Don't bother setting DP mem slot to NULL here, DRM
+ * will do this for us once the GEM is freed.
+ */
+ }
+
+ drm_gem_object_unreference_unlocked(&sfc->gem.base);
+
+ return ret;
+}
+
+int vigs_dp_surface_open_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct vigs_dp *dp = vigs_dev->dp;
+ struct drm_vigs_dp_open_surface *args = data;
+ struct vigs_surface *sfc = NULL;
+ uint32_t handle;
+ int ret;
+
+ if (args->dp_plane >= VIGS_MAX_PLANES) {
+ DRM_ERROR("bad DP plane = %u\n", args->dp_plane);
+ return -ENOMEM;
+ }
+
+ if (args->dp_fb_buf >= DRM_VIGS_NUM_DP_FB_BUF) {
+ DRM_ERROR("bad DP fb buf = %u\n", args->dp_fb_buf);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&vigs_dev->drm_dev->struct_mutex);
+
+ switch (args->dp_mem_flag) {
+ case DRM_VIGS_DP_FB_Y:
+ sfc = dp->planes[args->dp_plane].fb_bufs[args->dp_fb_buf].y;
+ break;
+ case DRM_VIGS_DP_FB_C:
+ sfc = dp->planes[args->dp_plane].fb_bufs[args->dp_fb_buf].c;
+ break;
+ default:
+ mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+ DRM_ERROR("bad DP mem flag = %u\n", args->dp_mem_flag);
+ return -ENOMEM;
+ }
+
+ if (sfc) {
+ drm_gem_object_reference(&sfc->gem.base);
+ } else {
+ mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+ DRM_INFO("DP mem %u:%u:%u is empty\n", args->dp_plane,
+ args->dp_fb_buf,
+ args->dp_mem_flag);
+ return -ENOMEM;
+ }
+
+ mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+
+ ret = drm_gem_handle_create(file_priv,
+ &sfc->gem.base,
+ &handle);
+
+ if (ret == 0) {
+ args->handle = handle;
+ }
+
+ drm_gem_object_unreference_unlocked(&sfc->gem.base);
+
+ return ret;
+}
--- /dev/null
+#ifndef _VIGS_DP_H_
+#define _VIGS_DP_H_
+
+#include "drmP.h"
+#include "vigs_protocol.h"
+#include <drm/vigs_drm.h>
+
+struct vigs_device;
+struct vigs_surface;
+
+struct vigs_dp_fb_buf
+{
+ /*
+ * These are weak pointers, no reference is kept
+ * for them. When surface is destroyed they're
+ * automatically reset to NULL. Must be
+ * accessed only with drm_device::struct_mutex held.
+ * @{
+ */
+
+ struct vigs_surface *y;
+ struct vigs_surface *c;
+
+ /*
+ * @}
+ */
+};
+
+struct vigs_dp_plane
+{
+ struct vigs_dp_fb_buf fb_bufs[DRM_VIGS_NUM_DP_FB_BUF];
+};
+
+struct vigs_dp
+{
+ struct vigs_dp_plane planes[VIGS_MAX_PLANES];
+};
+
+int vigs_dp_create(struct vigs_device *vigs_dev,
+ struct vigs_dp **dp);
+
+void vigs_dp_destroy(struct vigs_dp *dp);
+
+/*
+ * Must be called with drm_device::struct_mutex held.
+ * @{
+ */
+
+void vigs_dp_remove_surface(struct vigs_dp *dp, struct vigs_surface *sfc);
+
+/*
+ * @}
+ */
+
+/*
+ * IOCTLs
+ * @{
+ */
+
+int vigs_dp_surface_create_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_dp_surface_open_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
+#endif
--- /dev/null
+#include "vigs_driver.h"
+#include "vigs_gem.h"
+#include "vigs_device.h"
+#include "vigs_fbdev.h"
+#include "vigs_comm.h"
+#include "vigs_surface.h"
+#include "vigs_execbuffer.h"
+#include "vigs_irq.h"
+#include "vigs_fence.h"
+#include "vigs_file.h"
+#include "vigs_plane.h"
+#include "vigs_mman.h"
+#include "vigs_dp.h"
+#include "vigs_dmabuf.h"
+#include <drm/drmP.h>
+#include <linux/module.h>
+#include <drm/vigs_drm.h>
+
+#define PCI_VENDOR_ID_VIGS 0x19B2
+#define PCI_DEVICE_ID_VIGS 0x1011
+
+#define DRIVER_NAME "vigs"
+#define DRIVER_DESC "VIGS DRM"
+#define DRIVER_DATE "20121102"
+#define DRIVER_MAJOR DRM_VIGS_DRIVER_VERSION
+#define DRIVER_MINOR 0
+
+static struct pci_device_id vigs_pci_table[] =
+{
+ {
+ .vendor = PCI_VENDOR_ID_VIGS,
+ .device = PCI_DEVICE_ID_VIGS,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, vigs_pci_table);
+
+static struct drm_ioctl_desc vigs_drm_ioctls[] =
+{
+ DRM_IOCTL_DEF_DRV(VIGS_GET_PROTOCOL_VERSION, vigs_comm_get_protocol_version_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_CREATE_SURFACE, vigs_surface_create_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_CREATE_EXECBUFFER, vigs_execbuffer_create_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_GEM_MAP, vigs_gem_map_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_GEM_WAIT, vigs_gem_wait_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_SURFACE_INFO, vigs_surface_info_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_EXEC, vigs_execbuffer_exec_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_SURFACE_SET_GPU_DIRTY, vigs_surface_set_gpu_dirty_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_SURFACE_START_ACCESS, vigs_surface_start_access_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_SURFACE_END_ACCESS, vigs_surface_end_access_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_CREATE_FENCE, vigs_fence_create_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_FENCE_WAIT, vigs_fence_wait_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_FENCE_SIGNALED, vigs_fence_signaled_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_FENCE_UNREF, vigs_fence_unref_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_PLANE_SET_ZPOS, vigs_plane_set_zpos_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_PLANE_SET_TRANSFORM, vigs_plane_set_transform_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_DP_CREATE_SURFACE, vigs_dp_surface_create_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VIGS_DP_OPEN_SURFACE, vigs_dp_surface_open_ioctl,
+ DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+};
+
+static const struct file_operations vigs_drm_driver_fops =
+{
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .poll = drm_poll,
+ .mmap = vigs_device_mmap,
+ .read = drm_read
+};
+
+static int vigs_drm_load(struct drm_device *dev, unsigned long flags)
+{
+ int ret = 0;
+ struct vigs_device *vigs_dev = NULL;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ vigs_dev = kzalloc(sizeof(*vigs_dev), GFP_KERNEL);
+
+ if (vigs_dev == NULL) {
+ DRM_ERROR("failed to allocate VIGS device\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_private = vigs_dev;
+
+ ret = vigs_device_init(vigs_dev, dev, dev->pdev, flags);
+
+ if (ret != 0) {
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ kfree(vigs_dev);
+
+ return ret;
+}
+
+static int vigs_drm_unload(struct drm_device *dev)
+{
+ struct vigs_device *vigs_dev = dev->dev_private;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ vigs_device_cleanup(vigs_dev);
+
+ kfree(dev->dev_private);
+ dev->dev_private = NULL;
+
+ return 0;
+}
+
+static int vigs_drm_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+ int ret = 0;
+ struct vigs_device *vigs_dev = dev->dev_private;
+ struct vigs_file *vigs_file;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ ret = vigs_file_create(vigs_dev, &vigs_file);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ file_priv->driver_priv = vigs_file;
+
+ vigs_dev->mman->bo_dev.dev_mapping = dev->dev_mapping;
+
+ return 0;
+}
+
+static void vigs_drm_preclose(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = dev->dev_private;
+ struct drm_pending_vblank_event *event, *tmp;
+ unsigned long flags;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ list_for_each_entry_safe(event, tmp,
+ &vigs_dev->pageflip_event_list,
+ base.link) {
+ if (event->base.file_priv == file_priv) {
+ list_del(&event->base.link);
+ event->base.destroy(&event->base);
+ }
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void vigs_drm_postclose(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ struct vigs_file *vigs_file = file_priv->driver_priv;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ vigs_file_destroy(vigs_file);
+
+ file_priv->driver_priv = NULL;
+}
+
+static void vigs_drm_lastclose(struct drm_device *dev)
+{
+ struct vigs_device *vigs_dev = dev->dev_private;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ if (vigs_dev->fbdev) {
+ vigs_fbdev_restore_mode(vigs_dev->fbdev);
+ }
+
+ vigs_comm_reset(vigs_dev->comm);
+}
+
+static struct drm_driver vigs_drm_driver =
+{
+ .driver_features = DRIVER_GEM | DRIVER_MODESET |
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+ DRIVER_RENDER | DRIVER_PRIME,
+ .load = vigs_drm_load,
+ .unload = vigs_drm_unload,
+ .open = vigs_drm_open,
+ .preclose = vigs_drm_preclose,
+ .postclose = vigs_drm_postclose,
+ .lastclose = vigs_drm_lastclose,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = vigs_enable_vblank,
+ .disable_vblank = vigs_disable_vblank,
+ .irq_handler = vigs_irq_handler,
+ .gem_free_object = vigs_gem_free_object,
+ .gem_open_object = vigs_gem_open_object,
+ .gem_close_object = vigs_gem_close_object,
+
+ .prime_handle_to_fd = vigs_prime_handle_to_fd,
+ .prime_fd_to_handle = vigs_prime_fd_to_handle,
+ .gem_prime_export = vigs_dmabuf_prime_export,
+ .gem_prime_import = vigs_dmabuf_prime_import,
+
+ .dumb_create = vigs_gem_dumb_create,
+ .dumb_map_offset = vigs_gem_dumb_map_offset,
+ .dumb_destroy = vigs_gem_dumb_destroy,
+ .ioctls = vigs_drm_ioctls,
+ .num_ioctls = DRM_ARRAY_SIZE(vigs_drm_ioctls),
+ .fops = &vigs_drm_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static int vigs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &vigs_drm_driver);
+}
+
+static void vigs_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static struct pci_driver vigs_pci_driver =
+{
+ .name = DRIVER_NAME,
+ .id_table = vigs_pci_table,
+ .probe = vigs_pci_probe,
+ .remove = vigs_pci_remove,
+};
+
+int vigs_driver_register(void)
+{
+ return drm_pci_init(&vigs_drm_driver, &vigs_pci_driver);
+}
+
+void vigs_driver_unregister(void)
+{
+ drm_pci_exit(&vigs_drm_driver, &vigs_pci_driver);
+}
--- /dev/null
+#ifndef _VIGS_DRIVER_H_
+#define _VIGS_DRIVER_H_
+
+#include <linux/types.h>
+
+int vigs_driver_register(void);
+
+void vigs_driver_unregister(void);
+
+#endif
--- /dev/null
+#include "vigs_execbuffer.h"
+#include "vigs_device.h"
+#include "vigs_surface.h"
+#include "vigs_comm.h"
+#include "vigs_fence.h"
+#include <drm/vigs_drm.h>
+
+union vigs_request
+{
+ struct vigsp_cmd_update_vram_request *update_vram;
+ struct vigsp_cmd_update_gpu_request *update_gpu;
+ struct vigsp_cmd_copy_request *copy;
+ struct vigsp_cmd_solid_fill_request *solid_fill;
+ struct vigsp_cmd_ga_copy_request *ga_copy;
+ void *data;
+};
+
+static int vigs_execbuffer_validate_buffer(struct vigs_device *vigs_dev,
+ struct vigs_validate_buffer *buffer,
+ struct list_head* list,
+ vigsp_surface_id sfc_id,
+ vigsp_cmd cmd,
+ int which,
+ void *data)
+{
+ struct vigs_surface *sfc = vigs_device_reference_surface(vigs_dev, sfc_id);
+ struct vigs_validate_buffer *tmp;
+
+ if (!sfc) {
+ DRM_ERROR("Surface %u not found\n", sfc_id);
+ return -EINVAL;
+ }
+
+ buffer->base.bo = &sfc->gem.bo;
+ buffer->cmd = cmd;
+ buffer->which = which;
+ buffer->data = data;
+
+ list_for_each_entry(tmp, list, base.head) {
+ if (tmp->base.bo == buffer->base.bo) {
+ /*
+ * Already on the list, we're done.
+ */
+ return 0;
+ }
+ }
+
+ list_add_tail(&buffer->base.head, list);
+
+ return 0;
+}
+
+static void vigs_execbuffer_clear_validation(struct vigs_validate_buffer *buffer)
+{
+ struct vigs_gem_object *gem = bo_to_vigs_gem(buffer->base.bo);
+
+ drm_gem_object_unreference(&gem->base);
+}
+
+static void vigs_execbuffer_destroy(struct vigs_gem_object *gem)
+{
+}
+
+int vigs_execbuffer_create(struct vigs_device *vigs_dev,
+ unsigned long size,
+ bool kernel,
+ struct vigs_execbuffer **execbuffer)
+{
+ int ret = 0;
+
+ *execbuffer = kzalloc(sizeof(**execbuffer), GFP_KERNEL);
+
+ if (!*execbuffer) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ ret = vigs_gem_init(&(*execbuffer)->gem,
+ vigs_dev,
+ VIGS_GEM_TYPE_EXECBUFFER,
+ size,
+ kernel,
+ &vigs_execbuffer_destroy);
+
+ if (ret != 0) {
+ goto fail1;
+ }
+
+ return 0;
+
+fail1:
+ *execbuffer = NULL;
+
+ return ret;
+}
+
+int vigs_execbuffer_validate_buffers(struct vigs_execbuffer *execbuffer,
+ struct list_head* list,
+ struct vigs_validate_buffer **buffers,
+ int *num_buffers,
+ bool *sync)
+{
+ struct vigs_device *vigs_dev = execbuffer->gem.base.dev->dev_private;
+ void *data = execbuffer->gem.kptr;
+ u32 data_size = vigs_gem_size(&execbuffer->gem);
+ struct vigsp_cmd_batch_header *batch_header = data;
+ struct vigsp_cmd_request_header *request_header =
+ (struct vigsp_cmd_request_header*)(batch_header + 1);
+ union vigs_request request;
+ int num_commands = 0, ret = 0;
+
+ *num_buffers = 0;
+ *sync = false;
+
+ /*
+ * GEM is always at least PAGE_SIZE long, so don't check
+ * if batch header is out of bounds.
+ */
+
+ while ((void*)request_header <
+ ((void*)(batch_header + 1) + batch_header->size)) {
+ if (((void*)(request_header) + sizeof(*request_header)) >
+ (data + data_size)) {
+ DRM_ERROR("request header outside of GEM\n");
+ ret = -EINVAL;
+ goto fail1;
+ }
+
+ if (((void*)(request_header + 1) + request_header->size) >
+ (data + data_size)) {
+ DRM_ERROR("request data outside of GEM\n");
+ ret = -EINVAL;
+ goto fail1;
+ }
+
+ request.data = (request_header + 1);
+
+ switch (request_header->cmd) {
+ case vigsp_cmd_update_vram:
+ case vigsp_cmd_update_gpu:
+ *sync = true;
+ *num_buffers += 1;
+ break;
+ case vigsp_cmd_copy:
+ *num_buffers += 2;
+ break;
+ case vigsp_cmd_solid_fill:
+ *num_buffers += 1;
+ break;
+ case vigsp_cmd_ga_copy:
+ *num_buffers += 2;
+ break;
+ default:
+ break;
+ }
+
+ request_header =
+ (struct vigsp_cmd_request_header*)(request.data +
+ request_header->size);
+
+ ++num_commands;
+ }
+
+ *buffers = kmalloc(*num_buffers * sizeof(**buffers), GFP_KERNEL);
+
+ if (!*buffers) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ request_header = (struct vigsp_cmd_request_header*)(batch_header + 1);
+
+ mutex_lock(&vigs_dev->drm_dev->struct_mutex);
+
+ *num_buffers = 0;
+
+ while (--num_commands >= 0) {
+ request.data = (request_header + 1);
+
+ switch (request_header->cmd) {
+ case vigsp_cmd_update_vram:
+ ret = vigs_execbuffer_validate_buffer(vigs_dev,
+ &(*buffers)[*num_buffers],
+ list,
+ request.update_vram->sfc_id,
+ request_header->cmd,
+ 0,
+ request.data);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ++*num_buffers;
+
+ break;
+ case vigsp_cmd_update_gpu:
+ ret = vigs_execbuffer_validate_buffer(vigs_dev,
+ &(*buffers)[*num_buffers],
+ list,
+ request.update_gpu->sfc_id,
+ request_header->cmd,
+ 0,
+ request.data);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ++*num_buffers;
+
+ break;
+ case vigsp_cmd_copy:
+ ret = vigs_execbuffer_validate_buffer(vigs_dev,
+ &(*buffers)[*num_buffers],
+ list,
+ request.copy->src_id,
+ request_header->cmd,
+ 0,
+ request.data);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ++*num_buffers;
+
+ ret = vigs_execbuffer_validate_buffer(vigs_dev,
+ &(*buffers)[*num_buffers],
+ list,
+ request.copy->dst_id,
+ request_header->cmd,
+ 1,
+ request.data);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ++*num_buffers;
+
+ break;
+ case vigsp_cmd_solid_fill:
+ ret = vigs_execbuffer_validate_buffer(vigs_dev,
+ &(*buffers)[*num_buffers],
+ list,
+ request.solid_fill->sfc_id,
+ request_header->cmd,
+ 0,
+ request.data);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ++*num_buffers;
+
+ break;
+ case vigsp_cmd_ga_copy:
+ ret = vigs_execbuffer_validate_buffer(vigs_dev,
+ &(*buffers)[*num_buffers],
+ list,
+ request.ga_copy->src_id,
+ request_header->cmd,
+ 0,
+ request.data);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ++*num_buffers;
+
+ ret = vigs_execbuffer_validate_buffer(vigs_dev,
+ &(*buffers)[*num_buffers],
+ list,
+ request.ga_copy->dst_id,
+ request_header->cmd,
+ 1,
+ request.data);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ++*num_buffers;
+
+ break;
+ default:
+ break;
+ }
+
+ request_header =
+ (struct vigsp_cmd_request_header*)(request.data +
+ request_header->size);
+ }
+
+ mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+
+ return 0;
+
+fail2:
+ while (--*num_buffers >= 0) {
+ vigs_execbuffer_clear_validation(&(*buffers)[*num_buffers]);
+ }
+ mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+ kfree(*buffers);
+fail1:
+ *buffers = NULL;
+
+ return ret;
+}
+
+void vigs_execbuffer_process_buffers(struct vigs_execbuffer *execbuffer,
+ struct vigs_validate_buffer *buffers,
+ int num_buffers,
+ bool *sync)
+{
+ union vigs_request request;
+ struct vigs_gem_object *gem;
+ struct vigs_surface *sfc;
+ int i;
+
+ for (i = 0; i < num_buffers; ++i) {
+ request.data = buffers[i].data;
+ gem = bo_to_vigs_gem(buffers[i].base.bo);
+ sfc = vigs_gem_to_vigs_surface(gem);
+
+ switch (buffers[i].cmd) {
+ case vigsp_cmd_update_vram:
+ if (vigs_gem_in_vram(&sfc->gem)) {
+ if (vigs_surface_need_vram_update(sfc)) {
+ request.update_vram->offset = vigs_gem_offset(&sfc->gem);
+ sfc->is_gpu_dirty = false;
+ } else {
+ DRM_DEBUG_DRIVER("Surface %u doesn't need to be updated, ignoring update_vram\n",
+ request.update_vram->sfc_id);
+ request.update_vram->sfc_id = 0;
+ }
+ } else {
+ DRM_DEBUG_DRIVER("Surface %u not in VRAM, ignoring update_vram\n",
+ request.update_vram->sfc_id);
+ request.update_vram->sfc_id = 0;
+ }
+ break;
+ case vigsp_cmd_update_gpu:
+ if (vigs_gem_in_vram(&sfc->gem)) {
+ if (vigs_surface_need_gpu_update(sfc)) {
+ request.update_gpu->offset = vigs_gem_offset(&sfc->gem);
+ sfc->is_gpu_dirty = false;
+ } else {
+ DRM_DEBUG_DRIVER("Surface %u doesn't need to be updated, ignoring update_gpu\n",
+ request.update_gpu->sfc_id);
+ request.update_gpu->sfc_id = 0;
+ }
+ } else {
+ DRM_DEBUG_DRIVER("Surface %u not in VRAM, ignoring update_gpu\n",
+ request.update_gpu->sfc_id);
+ request.update_gpu->sfc_id = 0;
+ }
+ break;
+ case vigsp_cmd_copy:
+ if (buffers[i].which && vigs_gem_in_vram(&sfc->gem)) {
+ sfc->is_gpu_dirty = true;
+ }
+ break;
+ case vigsp_cmd_solid_fill:
+ if (vigs_gem_in_vram(&sfc->gem)) {
+ sfc->is_gpu_dirty = true;
+ }
+ break;
+ case vigsp_cmd_ga_copy:
+ if (buffers[i].which && vigs_gem_in_vram(&sfc->gem)) {
+ sfc->is_gpu_dirty = true;
+ } else if (buffers[i].which == 0) {
+ if (vigs_gem_in_vram(&sfc->gem)) {
+ request.ga_copy->src_scanout = true;
+ request.ga_copy->src_offset = vigs_gem_offset(&sfc->gem);
+ *sync = true;
+ } else {
+ request.ga_copy->src_scanout = false;
+ request.ga_copy->src_offset = 0;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void vigs_execbuffer_fence(struct vigs_execbuffer *execbuffer,
+ struct vigs_fence *fence)
+{
+ struct vigsp_cmd_batch_header *batch_header = execbuffer->gem.kptr;
+
+ batch_header->fence_seq = fence->seq;
+}
+
+void vigs_execbuffer_clear_validations(struct vigs_execbuffer *execbuffer,
+ struct vigs_validate_buffer *buffers,
+ int num_buffers)
+{
+ struct vigs_device *vigs_dev = execbuffer->gem.base.dev->dev_private;
+ int i;
+
+ mutex_lock(&vigs_dev->drm_dev->struct_mutex);
+
+ for (i = 0; i < num_buffers; ++i) {
+ vigs_execbuffer_clear_validation(&buffers[i]);
+ }
+
+ mutex_unlock(&vigs_dev->drm_dev->struct_mutex);
+
+ kfree(buffers);
+}
+
+int vigs_execbuffer_create_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct drm_vigs_create_execbuffer *args = data;
+ struct vigs_execbuffer *execbuffer = NULL;
+ uint32_t handle;
+ int ret;
+
+ ret = vigs_execbuffer_create(vigs_dev,
+ args->size,
+ false,
+ &execbuffer);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ ret = drm_gem_handle_create(file_priv,
+ &execbuffer->gem.base,
+ &handle);
+
+ drm_gem_object_unreference_unlocked(&execbuffer->gem.base);
+
+ if (ret == 0) {
+ args->size = vigs_gem_size(&execbuffer->gem);
+ args->handle = handle;
+ }
+
+ return ret;
+}
+
+int vigs_execbuffer_exec_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct drm_vigs_exec *args = data;
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+ struct vigs_execbuffer *execbuffer;
+ struct ww_acquire_ctx ticket;
+ struct list_head list;
+ struct vigs_validate_buffer *buffers;
+ int num_buffers = 0;
+ struct vigs_fence *fence = NULL;
+ bool sync = false;
+ int ret = 0;
+
+ INIT_LIST_HEAD(&list);
+
+ gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
+
+ if (gem == NULL) {
+ ret = -ENOENT;
+ goto out1;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ if (vigs_gem->type != VIGS_GEM_TYPE_EXECBUFFER) {
+ ret = -ENOENT;
+ goto out2;
+ }
+
+ execbuffer = vigs_gem_to_vigs_execbuffer(vigs_gem);
+
+ vigs_gem_reserve(vigs_gem);
+
+ /*
+ * Never unmap for optimization, but we got to be careful,
+ * worst case scenario is when whole RAM BAR is mapped into kernel.
+ */
+ ret = vigs_gem_kmap(vigs_gem);
+
+ if (ret != 0) {
+ vigs_gem_unreserve(vigs_gem);
+ goto out2;
+ }
+
+ vigs_gem_unreserve(vigs_gem);
+
+ ret = vigs_execbuffer_validate_buffers(execbuffer,
+ &list,
+ &buffers,
+ &num_buffers,
+ &sync);
+
+ if (ret != 0) {
+ goto out2;
+ }
+
+ if (list_empty(&list)) {
+ vigs_comm_exec(vigs_dev->comm, execbuffer);
+ } else {
+ ret = ttm_eu_reserve_buffers(&ticket, &list);
+
+ if (ret != 0) {
+ goto out3;
+ }
+
+ ret = vigs_fence_create(vigs_dev->fenceman, &fence);
+
+ if (ret != 0) {
+ ttm_eu_backoff_reservation(&ticket, &list);
+ goto out3;
+ }
+
+ vigs_execbuffer_process_buffers(execbuffer, buffers, num_buffers, &sync);
+
+ vigs_execbuffer_fence(execbuffer, fence);
+
+ vigs_comm_exec(vigs_dev->comm, execbuffer);
+
+ ttm_eu_fence_buffer_objects(&ticket, &list, fence);
+
+ if (sync) {
+ vigs_fence_wait(fence, false);
+ }
+
+ vigs_fence_unref(fence);
+ }
+
+out3:
+ vigs_execbuffer_clear_validations(execbuffer, buffers, num_buffers);
+out2:
+ drm_gem_object_unreference_unlocked(gem);
+out1:
+ return ret;
+}
--- /dev/null
+#ifndef _VIGS_EXECBUFFER_H_
+#define _VIGS_EXECBUFFER_H_
+
+#include "drmP.h"
+#include "vigs_gem.h"
+#include "vigs_protocol.h"
+#include <ttm/ttm_execbuf_util.h>
+
+struct vigs_fence;
+
+struct vigs_validate_buffer
+{
+ struct ttm_validate_buffer base;
+
+ vigsp_cmd cmd;
+
+ int which;
+
+ void *data;
+};
+
+struct vigs_execbuffer
+{
+ /*
+ * Must be first member!
+ */
+ struct vigs_gem_object gem;
+};
+
+static inline struct vigs_execbuffer *vigs_gem_to_vigs_execbuffer(struct vigs_gem_object *vigs_gem)
+{
+ return container_of(vigs_gem, struct vigs_execbuffer, gem);
+}
+
+int vigs_execbuffer_create(struct vigs_device *vigs_dev,
+ unsigned long size,
+ bool kernel,
+ struct vigs_execbuffer **execbuffer);
+
+int vigs_execbuffer_validate_buffers(struct vigs_execbuffer *execbuffer,
+ struct list_head* list,
+ struct vigs_validate_buffer **buffers,
+ int *num_buffers,
+ bool *sync);
+
+void vigs_execbuffer_process_buffers(struct vigs_execbuffer *execbuffer,
+ struct vigs_validate_buffer *buffers,
+ int num_buffers,
+ bool *sync);
+
+void vigs_execbuffer_fence(struct vigs_execbuffer *execbuffer,
+ struct vigs_fence *fence);
+
+void vigs_execbuffer_clear_validations(struct vigs_execbuffer *execbuffer,
+ struct vigs_validate_buffer *buffers,
+ int num_buffers);
+
+/*
+ * IOCTLs
+ * @{
+ */
+
+int vigs_execbuffer_create_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_execbuffer_exec_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
+#endif
--- /dev/null
+#include "vigs_fbdev.h"
+#include "vigs_device.h"
+#include "vigs_surface.h"
+#include "vigs_framebuffer.h"
+#include "vigs_output.h"
+#include "vigs_crtc.h"
+#include "drm_crtc_helper.h"
+#include <drm/vigs_drm.h>
+
+/*
+ * From drm_fb_helper.c, modified to work with 'regno' > 16.
+ * @{
+ */
+
+static bool vigs_fbdev_helper_is_bound(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc *crtc;
+ int bound = 0, crtcs_bound = 0;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb) {
+ crtcs_bound++;
+ }
+
+ if (crtc->fb == fb_helper->fb) {
+ bound++;
+ }
+ }
+
+ if (bound < crtcs_bound) {
+ return false;
+ }
+
+ return true;
+}
+
+static int vigs_fbdev_setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, u16 regno, struct fb_info *fbi)
+{
+ struct drm_fb_helper *fb_helper = fbi->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ int pindex;
+
+ if (fbi->fix.visual == FB_VISUAL_TRUECOLOR) {
+ u32 *palette;
+ u32 value;
+ /* place color in psuedopalette */
+ if (regno <= 16) {
+ palette = (u32*)fbi->pseudo_palette;
+ red >>= (16 - fbi->var.red.length);
+ green >>= (16 - fbi->var.green.length);
+ blue >>= (16 - fbi->var.blue.length);
+ value = (red << fbi->var.red.offset) |
+ (green << fbi->var.green.offset) |
+ (blue << fbi->var.blue.offset);
+ if (fbi->var.transp.length > 0) {
+ u32 mask = (1 << fbi->var.transp.length) - 1;
+ mask <<= fbi->var.transp.offset;
+ value |= mask;
+ }
+ palette[regno] = value;
+ }
+ return 0;
+ }
+
+ /*
+ * The driver really shouldn't advertise pseudo/directcolor
+ * visuals if it can't deal with the palette.
+ */
+ if (WARN_ON(!fb_helper->funcs->gamma_set ||
+ !fb_helper->funcs->gamma_get)) {
+ return -EINVAL;
+ }
+
+ pindex = regno;
+
+ if (fb->bits_per_pixel == 16) {
+ pindex = regno << 3;
+
+ if ((fb->depth == 16) && (regno > 63)) {
+ return -EINVAL;
+ }
+
+ if ((fb->depth == 15) && (regno > 31)) {
+ return -EINVAL;
+ }
+
+ if (fb->depth == 16) {
+ u16 r, g, b;
+ int i;
+
+ if (regno < 32) {
+ for (i = 0; i < 8; i++) {
+ fb_helper->funcs->gamma_set(crtc, red,
+ green, blue, pindex + i);
+ }
+ }
+
+ fb_helper->funcs->gamma_get(crtc, &r,
+ &g, &b,
+ (pindex >> 1));
+
+ for (i = 0; i < 4; i++) {
+ fb_helper->funcs->gamma_set(crtc, r,
+ green, b,
+ (pindex >> 1) + i);
+ }
+ }
+ }
+
+ if (fb->depth != 16) {
+ fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
+ }
+
+ return 0;
+}
+
+static int vigs_fbdev_setcmap(struct fb_cmap *cmap, struct fb_info *fbi)
+{
+ struct drm_fb_helper *fb_helper = fbi->par;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ u16 *red, *green, *blue, *transp;
+ struct drm_crtc *crtc;
+ int i, j, ret = 0;
+ int start;
+
+ drm_modeset_lock_all(dev);
+ if (!vigs_fbdev_helper_is_bound(fb_helper)) {
+ drm_modeset_unlock_all(dev);
+ return -EBUSY;
+ }
+
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
+
+ red = cmap->red;
+ green = cmap->green;
+ blue = cmap->blue;
+ transp = cmap->transp;
+ start = cmap->start;
+
+ for (j = 0; j < cmap->len; j++) {
+ u16 hred, hgreen, hblue, htransp = 0xffff;
+
+ hred = *red++;
+ hgreen = *green++;
+ hblue = *blue++;
+
+ if (transp) {
+ htransp = *transp++;
+ }
+
+ ret = vigs_fbdev_setcolreg(crtc, hred, hgreen, hblue, start++, fbi);
+
+ if (ret != 0) {
+ goto out;
+ }
+ }
+
+ if (crtc_funcs->load_lut) {
+ crtc_funcs->load_lut(crtc);
+ }
+ }
+
+ out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/*
+ * @}
+ */
+
+static int vigs_fbdev_set_par(struct fb_info *fbi)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ return drm_fb_helper_set_par(fbi);
+}
+
+/*
+ * This is 'drm_fb_helper_dpms' modified to set 'in_dpms'
+ * flag inside drm_modeset_lock_all.
+ */
+static void vigs_fbdev_dpms(struct fb_info *fbi, int dpms_mode)
+{
+ struct drm_fb_helper *fb_helper = fbi->par;
+ struct drm_device *dev = fb_helper->dev;
+ struct vigs_device *vigs_dev = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct vigs_crtc *vigs_crtc;
+ struct drm_connector *connector;
+ int i, j;
+
+ /*
+ * fbdev->blank can be called from irq context in case of a panic.
+ * Since we already have our own special panic handler which will
+ * restore the fbdev console mode completely, just bail out early.
+ */
+ if (oops_in_progress) {
+ return;
+ }
+
+ if (vigs_dev->in_dpms) {
+ /*
+ * If this is called from 'vigs_crtc_dpms' then we just
+ * return in order to not deadlock. Note that it's
+ * correct to check this flag here without any locks
+ * being held since 'fb_blank' callback is already called with
+ * console lock being held and 'vigs_crtc_dpms' only sets in_dpms
+ * inside the console lock.
+ */
+ return;
+ }
+
+ /*
+ * For each CRTC in this fb, turn the connectors on/off.
+ */
+ drm_modeset_lock_all(dev);
+ if (!vigs_fbdev_helper_is_bound(fb_helper)) {
+ drm_modeset_unlock_all(dev);
+ return;
+ }
+
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ vigs_crtc = crtc_to_vigs_crtc(crtc);
+
+ if (!crtc->enabled) {
+ continue;
+ }
+
+ vigs_dev->in_dpms = true;
+
+ /* Walk the connectors & encoders on this fb turning them on/off */
+ for (j = 0; j < fb_helper->connector_count; j++) {
+ connector = fb_helper->connector_info[j]->connector;
+ connector->funcs->dpms(connector, dpms_mode);
+ drm_object_property_set_value(&connector->base,
+ dev->mode_config.dpms_property, dpms_mode);
+ }
+
+ vigs_dev->in_dpms = false;
+ }
+
+ drm_modeset_unlock_all(dev);
+}
+
+/*
+ * This is 'drm_fb_helper_blank' modified to use
+ * 'vigs_fbdev_dpms'.
+ */
+static int vigs_fbdev_blank(int blank, struct fb_info *fbi)
+{
+ switch (blank) {
+ /* Display: On; HSync: On, VSync: On */
+ case FB_BLANK_UNBLANK:
+ vigs_fbdev_dpms(fbi, DRM_MODE_DPMS_ON);
+ break;
+ /* Display: Off; HSync: On, VSync: On */
+ case FB_BLANK_NORMAL:
+ vigs_fbdev_dpms(fbi, DRM_MODE_DPMS_STANDBY);
+ break;
+ /* Display: Off; HSync: Off, VSync: On */
+ case FB_BLANK_HSYNC_SUSPEND:
+ vigs_fbdev_dpms(fbi, DRM_MODE_DPMS_STANDBY);
+ break;
+ /* Display: Off; HSync: On, VSync: Off */
+ case FB_BLANK_VSYNC_SUSPEND:
+ vigs_fbdev_dpms(fbi, DRM_MODE_DPMS_SUSPEND);
+ break;
+ /* Display: Off; HSync: Off, VSync: Off */
+ case FB_BLANK_POWERDOWN:
+ vigs_fbdev_dpms(fbi, DRM_MODE_DPMS_OFF);
+ break;
+ }
+
+ return 0;
+}
+
+static struct fb_ops vigs_fbdev_ops =
+{
+ .owner = THIS_MODULE,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = vigs_fbdev_set_par,
+ .fb_blank = vigs_fbdev_blank,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_setcmap = vigs_fbdev_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int vigs_fbdev_probe_once(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct vigs_fbdev *vigs_fbdev = fbdev_to_vigs_fbdev(helper);
+ struct vigs_device *vigs_dev = helper->dev->dev_private;
+ struct vigs_surface *fb_sfc;
+ struct vigs_framebuffer *vigs_fb;
+ struct fb_info *fbi;
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ vigsp_surface_format format;
+ unsigned long offset;
+ int dpi;
+ int ret;
+ struct drm_connector *connector;
+
+ DRM_DEBUG_KMS("%dx%dx%d\n",
+ sizes->surface_width,
+ sizes->surface_height,
+ sizes->surface_bpp);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ switch (mode_cmd.pixel_format) {
+ case DRM_FORMAT_XRGB8888:
+ format = vigsp_surface_bgrx8888;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ format = vigsp_surface_bgra8888;
+ break;
+ default:
+ DRM_DEBUG_KMS("unsupported pixel format: %u\n", mode_cmd.pixel_format);
+ ret = -EINVAL;
+ goto fail1;
+ }
+
+ fbi = framebuffer_alloc(0, &vigs_dev->pci_dev->dev);
+
+ if (!fbi) {
+ DRM_ERROR("failed to allocate fb info\n");
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ ret = vigs_surface_create(vigs_dev,
+ mode_cmd.width,
+ mode_cmd.height,
+ mode_cmd.pitches[0],
+ format,
+ true,
+ &fb_sfc);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ret = vigs_framebuffer_create(vigs_dev,
+ &mode_cmd,
+ fb_sfc,
+ &vigs_fb);
+
+ drm_gem_object_unreference_unlocked(&fb_sfc->gem.base);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ helper->fb = &vigs_fb->base;
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ fbi->fbops = &vigs_fbdev_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+
+ if (ret != 0) {
+ DRM_ERROR("failed to allocate cmap\n");
+ goto fail3;
+ }
+
+ /*
+ * This is a hack to make fbdev work without calling
+ * 'vigs_framebuffer_pin'. VRAM is precious resource and we
+ * don't want to give it away to fbdev just to show
+ * that "kernel loading" thing. Here we assume that
+ * GEM zero is always located at offset 0 in VRAM and just map
+ * it and give it to fbdev. If later, when X starts for example,
+ * one will attempt to write to /dev/fb0 then he'll probably
+ * write to some GEM's memory, but we don't care.
+ */
+ vigs_fbdev->kptr = ioremap_wc(vigs_dev->vram_base,
+ vigs_gem_size(&fb_sfc->gem));
+
+ if (!vigs_fbdev->kptr) {
+ goto fail4;
+ }
+
+ strcpy(fbi->fix.id, "VIGS");
+
+ drm_fb_helper_fill_fix(fbi, vigs_fb->base.pitches[0], vigs_fb->base.depth);
+ drm_fb_helper_fill_var(fbi, helper, vigs_fb->base.width, vigs_fb->base.height);
+
+ /*
+ * Setup DPI.
+ * @{
+ */
+
+ dpi = vigs_output_get_dpi();
+ fbi->var.height = vigs_output_get_phys_height(dpi, fbi->var.yres);
+ fbi->var.width = vigs_output_get_phys_width(dpi, fbi->var.xres);
+
+ /*
+ * Walk all connectors and set display_info.
+ */
+
+ list_for_each_entry(connector, &vigs_dev->drm_dev->mode_config.connector_list, head) {
+ connector->display_info.width_mm = fbi->var.width;
+ connector->display_info.height_mm = fbi->var.height;
+ }
+
+ /*
+ * @}
+ */
+
+ /*
+ * TODO: Play around with xoffset/yoffset, make sure this code works.
+ */
+
+ offset = fbi->var.xoffset * (vigs_fb->base.bits_per_pixel >> 3);
+ offset += fbi->var.yoffset * vigs_fb->base.pitches[0];
+
+ /*
+ * TODO: "vram_base + ..." - not nice, make a function for this.
+ */
+ fbi->fix.smem_start = vigs_dev->vram_base +
+ 0 +
+ offset;
+ fbi->screen_base = vigs_fbdev->kptr + offset;
+ fbi->screen_size = fbi->fix.smem_len = vigs_fb->base.width *
+ vigs_fb->base.height *
+ (vigs_fb->base.bits_per_pixel >> 3);
+
+ return 0;
+
+fail4:
+ fb_dealloc_cmap(&fbi->cmap);
+fail3:
+ helper->fb = NULL;
+ helper->fbdev = NULL;
+fail2:
+ framebuffer_release(fbi);
+fail1:
+
+ return ret;
+}
+
+static int vigs_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ int ret = 0;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ /*
+ * With !helper->fb, it means that this function is called first time
+ * and after that, the helper->fb would be used as clone mode.
+ */
+
+ if (!helper->fb) {
+ ret = vigs_fbdev_probe_once(helper, sizes);
+
+ if (ret >= 0) {
+ ret = 1;
+ }
+ }
+
+ return ret;
+}
+
+static struct drm_fb_helper_funcs vigs_fbdev_funcs =
+{
+ .fb_probe = vigs_fbdev_probe,
+};
+
+int vigs_fbdev_create(struct vigs_device *vigs_dev,
+ struct vigs_fbdev **vigs_fbdev)
+{
+ int ret = 0;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ *vigs_fbdev = kzalloc(sizeof(**vigs_fbdev), GFP_KERNEL);
+
+ if (!*vigs_fbdev) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ (*vigs_fbdev)->base.funcs = &vigs_fbdev_funcs;
+
+ ret = drm_fb_helper_init(vigs_dev->drm_dev,
+ &(*vigs_fbdev)->base,
+ 1, 1);
+
+ if (ret != 0) {
+ DRM_ERROR("unable to init fb_helper: %d\n", ret);
+ goto fail2;
+ }
+
+ drm_fb_helper_single_add_all_connectors(&(*vigs_fbdev)->base);
+ drm_fb_helper_initial_config(&(*vigs_fbdev)->base, 32);
+
+ return 0;
+
+fail2:
+ kfree(*vigs_fbdev);
+fail1:
+ *vigs_fbdev = NULL;
+
+ return ret;
+}
+
+void vigs_fbdev_destroy(struct vigs_fbdev *vigs_fbdev)
+{
+ struct fb_info *fbi = vigs_fbdev->base.fbdev;
+ struct drm_framebuffer *fb;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ if (fbi) {
+ unregister_framebuffer(fbi);
+ fb_dealloc_cmap(&fbi->cmap);
+ framebuffer_release(fbi);
+ }
+
+ fb = vigs_fbdev->base.fb;
+
+ drm_fb_helper_fini(&vigs_fbdev->base);
+
+ if (vigs_fbdev->kptr) {
+ iounmap(vigs_fbdev->kptr);
+ }
+
+ drm_framebuffer_unregister_private(fb);
+ drm_framebuffer_remove(fb);
+
+ kfree(vigs_fbdev);
+}
+
+void vigs_fbdev_output_poll_changed(struct vigs_fbdev *vigs_fbdev)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ drm_fb_helper_hotplug_event(&vigs_fbdev->base);
+}
+
+void vigs_fbdev_restore_mode(struct vigs_fbdev *vigs_fbdev)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ drm_modeset_lock_all(vigs_fbdev->base.dev);
+ drm_fb_helper_restore_fbdev_mode(&vigs_fbdev->base);
+ drm_modeset_unlock_all(vigs_fbdev->base.dev);
+}
--- /dev/null
+#ifndef _VIGS_FBDEV_H_
+#define _VIGS_FBDEV_H_
+
+#include "drmP.h"
+#include "drm_fb_helper.h"
+
+struct vigs_device;
+
+struct vigs_fbdev
+{
+ struct drm_fb_helper base;
+
+ void __iomem *kptr;
+};
+
+static inline struct vigs_fbdev *fbdev_to_vigs_fbdev(struct drm_fb_helper *fbdev)
+{
+ return container_of(fbdev, struct vigs_fbdev, base);
+}
+
+int vigs_fbdev_create(struct vigs_device *vigs_dev,
+ struct vigs_fbdev **vigs_fbdev);
+
+void vigs_fbdev_destroy(struct vigs_fbdev *vigs_fbdev);
+
+void vigs_fbdev_output_poll_changed(struct vigs_fbdev *vigs_fbdev);
+
+void vigs_fbdev_restore_mode(struct vigs_fbdev *vigs_fbdev);
+
+#endif
--- /dev/null
+#include "vigs_fence.h"
+#include "vigs_fenceman.h"
+#include "vigs_file.h"
+#include "vigs_device.h"
+#include "vigs_comm.h"
+#include <drm/vigs_drm.h>
+
+static void vigs_fence_cleanup(struct vigs_fence *fence)
+{
+}
+
+static void vigs_fence_destroy(struct vigs_fence *fence)
+{
+ vigs_fence_cleanup(fence);
+ kfree(fence);
+}
+
+static void vigs_user_fence_destroy(struct vigs_fence *fence)
+{
+ struct vigs_user_fence *user_fence = vigs_fence_to_vigs_user_fence(fence);
+
+ vigs_fence_cleanup(&user_fence->fence);
+ ttm_base_object_kfree(user_fence, base);
+}
+
+static void vigs_fence_release_locked(struct kref *kref)
+{
+ struct vigs_fence *fence = kref_to_vigs_fence(kref);
+
+ DRM_DEBUG_DRIVER("Fence destroyed (seq = %u, signaled = %u)\n",
+ fence->seq,
+ fence->signaled);
+
+ list_del_init(&fence->list);
+ fence->destroy(fence);
+}
+
+static void vigs_user_fence_refcount_release(struct ttm_base_object **base)
+{
+ struct ttm_base_object *tmp = *base;
+ struct vigs_user_fence *user_fence = base_to_vigs_user_fence(tmp);
+
+ vigs_fence_unref(&user_fence->fence);
+ *base = NULL;
+}
+
+static void vigs_fence_init(struct vigs_fence *fence,
+ struct vigs_fenceman *fenceman,
+ void (*destroy)(struct vigs_fence*))
+{
+ unsigned long flags;
+
+ kref_init(&fence->kref);
+ INIT_LIST_HEAD(&fence->list);
+ fence->fenceman = fenceman;
+ fence->signaled = false;
+ init_waitqueue_head(&fence->wait);
+ fence->destroy = destroy;
+
+ spin_lock_irqsave(&fenceman->lock, flags);
+
+ fence->seq = vigs_fence_seq_next(fenceman->seq);
+ fenceman->seq = fence->seq;
+
+ list_add_tail(&fence->list, &fenceman->fence_list);
+
+ spin_unlock_irqrestore(&fenceman->lock, flags);
+
+ DRM_DEBUG_DRIVER("Fence created (seq = %u)\n", fence->seq);
+}
+
+int vigs_fence_create(struct vigs_fenceman *fenceman,
+ struct vigs_fence **fence)
+{
+ int ret = 0;
+
+ *fence = kzalloc(sizeof(**fence), GFP_KERNEL);
+
+ if (!*fence) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ vigs_fence_init(*fence, fenceman, &vigs_fence_destroy);
+
+ return 0;
+
+fail1:
+ *fence = NULL;
+
+ return ret;
+}
+
+int vigs_user_fence_create(struct vigs_fenceman *fenceman,
+ struct drm_file *file_priv,
+ struct vigs_user_fence **user_fence,
+ uint32_t *handle)
+{
+ struct vigs_file *vigs_file = file_priv->driver_priv;
+ int ret = 0;
+
+ *user_fence = kzalloc(sizeof(**user_fence), GFP_KERNEL);
+
+ if (!*user_fence) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ vigs_fence_init(&(*user_fence)->fence, fenceman, &vigs_user_fence_destroy);
+
+ ret = ttm_base_object_init(vigs_file->obj_file,
+ &(*user_fence)->base, false,
+ VIGS_FENCE_TYPE,
+ &vigs_user_fence_refcount_release,
+ NULL);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ /*
+ * For ttm_base_object.
+ */
+ vigs_fence_ref(&(*user_fence)->fence);
+
+ *handle = (*user_fence)->base.hash.key;
+
+ return 0;
+
+fail2:
+ vigs_fence_cleanup(&(*user_fence)->fence);
+ kfree(*user_fence);
+fail1:
+ *user_fence = NULL;
+
+ return ret;
+}
+
+int vigs_fence_wait(struct vigs_fence *fence, bool interruptible)
+{
+ long ret = 0;
+
+ if (vigs_fence_signaled(fence)) {
+ DRM_DEBUG_DRIVER("Fence wait (seq = %u, signaled = %u)\n",
+ fence->seq,
+ fence->signaled);
+ return 0;
+ }
+
+ DRM_DEBUG_DRIVER("Fence wait (seq = %u)\n", fence->seq);
+
+ if (interruptible) {
+ ret = wait_event_interruptible(fence->wait, vigs_fence_signaled(fence));
+ } else {
+ wait_event(fence->wait, vigs_fence_signaled(fence));
+ }
+
+ if (ret != 0) {
+ DRM_INFO("Fence wait interrupted (seq = %u) = %ld\n", fence->seq, ret);
+ } else {
+ DRM_DEBUG_DRIVER("Fence wait done (seq = %u)\n", fence->seq);
+ }
+
+ return ret;
+}
+
+bool vigs_fence_signaled(struct vigs_fence *fence)
+{
+ unsigned long flags;
+ bool signaled;
+
+ spin_lock_irqsave(&fence->fenceman->lock, flags);
+
+ signaled = fence->signaled;
+
+ spin_unlock_irqrestore(&fence->fenceman->lock, flags);
+
+ return signaled;
+}
+
+void vigs_fence_ref(struct vigs_fence *fence)
+{
+ if (unlikely(!fence)) {
+ return;
+ }
+
+ kref_get(&fence->kref);
+}
+
+void vigs_fence_unref(struct vigs_fence *fence)
+{
+ struct vigs_fenceman *fenceman;
+
+ if (unlikely(!fence)) {
+ return;
+ }
+
+ fenceman = fence->fenceman;
+
+ spin_lock_irq(&fenceman->lock);
+ BUG_ON(atomic_read(&fence->kref.refcount) == 0);
+ kref_put(&fence->kref, vigs_fence_release_locked);
+ spin_unlock_irq(&fenceman->lock);
+}
+
+int vigs_fence_create_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct vigs_file *vigs_file = file_priv->driver_priv;
+ struct drm_vigs_create_fence *args = data;
+ struct vigs_user_fence *user_fence;
+ uint32_t handle;
+ int ret;
+
+ ret = vigs_user_fence_create(vigs_dev->fenceman,
+ file_priv,
+ &user_fence,
+ &handle);
+
+ if (ret != 0) {
+ goto out;
+ }
+
+ if (args->send) {
+ ret = vigs_comm_fence(vigs_dev->comm, &user_fence->fence);
+
+ if (ret != 0) {
+ ttm_ref_object_base_unref(vigs_file->obj_file,
+ handle,
+ TTM_REF_USAGE);
+ goto out;
+ }
+ }
+
+ args->handle = handle;
+ args->seq = user_fence->fence.seq;
+
+out:
+ vigs_fence_unref(&user_fence->fence);
+
+ return ret;
+}
+
+int vigs_fence_wait_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_file *vigs_file = file_priv->driver_priv;
+ struct drm_vigs_fence_wait *args = data;
+ struct ttm_base_object *base;
+ struct vigs_user_fence *user_fence;
+ int ret;
+
+ base = ttm_base_object_lookup(vigs_file->obj_file, args->handle);
+
+ if (!base) {
+ return -ENOENT;
+ }
+
+ user_fence = base_to_vigs_user_fence(base);
+
+ ret = vigs_fence_wait(&user_fence->fence, true);
+
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
+
+int vigs_fence_signaled_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_file *vigs_file = file_priv->driver_priv;
+ struct drm_vigs_fence_signaled *args = data;
+ struct ttm_base_object *base;
+ struct vigs_user_fence *user_fence;
+
+ base = ttm_base_object_lookup(vigs_file->obj_file, args->handle);
+
+ if (!base) {
+ return -ENOENT;
+ }
+
+ user_fence = base_to_vigs_user_fence(base);
+
+ args->signaled = vigs_fence_signaled(&user_fence->fence);
+
+ ttm_base_object_unref(&base);
+
+ return 0;
+}
+
+int vigs_fence_unref_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_file *vigs_file = file_priv->driver_priv;
+ struct drm_vigs_fence_unref *args = data;
+
+ return ttm_ref_object_base_unref(vigs_file->obj_file,
+ args->handle,
+ TTM_REF_USAGE);
+}
--- /dev/null
+#ifndef _VIGS_FENCE_H_
+#define _VIGS_FENCE_H_
+
+#include "drmP.h"
+#include <ttm/ttm_object.h>
+
+#define VIGS_FENCE_TYPE ttm_driver_type2
+
+struct vigs_fenceman;
+
+struct vigs_fence
+{
+ struct kref kref;
+
+ struct list_head list;
+
+ struct vigs_fenceman *fenceman;
+
+ uint32_t seq;
+
+ bool signaled;
+
+ wait_queue_head_t wait;
+
+ void (*destroy)(struct vigs_fence *fence);
+};
+
+/*
+ * Users can access fences via TTM base object mechanism,
+ * thus, we need to wrap vigs_fence into vigs_user_fence because
+ * not every fence object needs to be referenced from user space.
+ * So no point in always having struct ttm_base_object inside vigs_fence.
+ */
+
+struct vigs_user_fence
+{
+ struct ttm_base_object base;
+
+ struct vigs_fence fence;
+};
+
+static inline struct vigs_fence *kref_to_vigs_fence(struct kref *kref)
+{
+ return container_of(kref, struct vigs_fence, kref);
+}
+
+static inline struct vigs_user_fence *vigs_fence_to_vigs_user_fence(struct vigs_fence *fence)
+{
+ return container_of(fence, struct vigs_user_fence, fence);
+}
+
+static inline struct vigs_user_fence *base_to_vigs_user_fence(struct ttm_base_object *base)
+{
+ return container_of(base, struct vigs_user_fence, base);
+}
+
+static inline uint32_t vigs_fence_seq_next(uint32_t seq)
+{
+ if (++seq == 0) {
+ ++seq;
+ }
+ return seq;
+}
+
+#define vigs_fence_seq_num_after(a, b) \
+ (typecheck(u32, a) && typecheck(u32, b) && ((s32)(b) - (s32)(a) < 0))
+
+#define vigs_fence_seq_num_before(a, b) vigs_fence_seq_num_after(b, a)
+
+#define vigs_fence_seq_num_after_eq(a, b) \
+ ( typecheck(u32, a) && typecheck(u32, b) && \
+ ((s32)(a) - (s32)(b) >= 0) )
+
+#define vigs_fence_seq_num_before_eq(a, b) vigs_fence_seq_num_after_eq(b, a)
+
+int vigs_fence_create(struct vigs_fenceman *fenceman,
+ struct vigs_fence **fence);
+
+int vigs_user_fence_create(struct vigs_fenceman *fenceman,
+ struct drm_file *file_priv,
+ struct vigs_user_fence **user_fence,
+ uint32_t *handle);
+
+int vigs_fence_wait(struct vigs_fence *fence, bool interruptible);
+
+bool vigs_fence_signaled(struct vigs_fence *fence);
+
+/*
+ * Passing NULL won't hurt, this is for convenience.
+ */
+void vigs_fence_ref(struct vigs_fence *fence);
+
+/*
+ * Passing NULL won't hurt, this is for convenience.
+ */
+void vigs_fence_unref(struct vigs_fence *fence);
+
+/*
+ * IOCTLs
+ * @{
+ */
+
+int vigs_fence_create_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_fence_wait_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_fence_signaled_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_fence_unref_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
+#endif
--- /dev/null
+#include "vigs_fenceman.h"
+#include "vigs_fence.h"
+
+int vigs_fenceman_create(struct vigs_fenceman **fenceman)
+{
+ int ret = 0;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ *fenceman = kzalloc(sizeof(**fenceman), GFP_KERNEL);
+
+ if (!*fenceman) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ spin_lock_init(&(*fenceman)->lock);
+ INIT_LIST_HEAD(&(*fenceman)->fence_list);
+ (*fenceman)->seq = UINT_MAX;
+
+ return 0;
+
+fail1:
+ *fenceman = NULL;
+
+ return ret;
+}
+
+void vigs_fenceman_destroy(struct vigs_fenceman *fenceman)
+{
+ unsigned long flags;
+ bool fence_list_empty;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ spin_lock_irqsave(&fenceman->lock, flags);
+ fence_list_empty = list_empty(&fenceman->fence_list);
+ spin_unlock_irqrestore(&fenceman->lock, flags);
+
+ BUG_ON(!fence_list_empty);
+
+ kfree(fenceman);
+}
+
+void vigs_fenceman_ack(struct vigs_fenceman *fenceman,
+ uint32_t lower, uint32_t upper)
+{
+ unsigned long flags;
+ struct vigs_fence *fence, *tmp;
+
+ spin_lock_irqsave(&fenceman->lock, flags);
+
+ list_for_each_entry_safe(fence, tmp, &fenceman->fence_list, list) {
+ if (vigs_fence_seq_num_after_eq(fence->seq, lower) &&
+ vigs_fence_seq_num_before_eq(fence->seq, upper)) {
+ DRM_DEBUG_DRIVER("Fence signaled (seq = %u)\n",
+ fence->seq);
+ list_del_init(&fence->list);
+ fence->signaled = true;
+ wake_up_all(&fence->wait);
+ }
+ }
+
+ spin_unlock_irqrestore(&fenceman->lock, flags);
+}
--- /dev/null
+#ifndef _VIGS_FENCEMAN_H_
+#define _VIGS_FENCEMAN_H_
+
+#include "drmP.h"
+
+/*
+ * This is fence manager for VIGS. It's responsible for the following:
+ * + Fence bookkeeping.
+ * + Fence sequence number management and IRQ processing.
+ */
+
+struct vigs_fenceman
+{
+ /*
+ * Lock that's used to guard all data inside
+ * fence manager and fence objects. Don't confuse it
+ * with struct ttm_bo_device::fence_lock, that lock
+ * is used to work with TTM sync objects, i.e. it's more
+ * "high level".
+ */
+ spinlock_t lock;
+
+ /*
+ * List of currently pending fences.
+ */
+ struct list_head fence_list;
+
+ /*
+ * Current sequence number, new fence should be
+ * assigned (seq + 1).
+ * Note! Sequence numbers are always non-0, 0 is
+ * a special value that tells GPU not to fence things.
+ */
+ uint32_t seq;
+};
+
+int vigs_fenceman_create(struct vigs_fenceman **fenceman);
+
+void vigs_fenceman_destroy(struct vigs_fenceman *fenceman);
+
+/*
+ * Can be called from IRQ handler.
+ */
+void vigs_fenceman_ack(struct vigs_fenceman *fenceman,
+ uint32_t lower, uint32_t upper);
+
+#endif
--- /dev/null
+#include "vigs_file.h"
+#include "vigs_device.h"
+
+int vigs_file_create(struct vigs_device *vigs_dev,
+ struct vigs_file **vigs_file)
+{
+ int ret = 0;
+
+ *vigs_file = kzalloc(sizeof(**vigs_file), GFP_KERNEL);
+
+ if (!*vigs_file) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ (*vigs_file)->obj_file = ttm_object_file_init(vigs_dev->obj_dev, 10);
+
+ if (!(*vigs_file)->obj_file) {
+ ret = -ENOMEM;
+ goto fail2;
+ }
+
+ return 0;
+
+fail2:
+ kfree(*vigs_file);
+fail1:
+ *vigs_file = NULL;
+
+ return ret;
+}
+
+void vigs_file_destroy(struct vigs_file *vigs_file)
+{
+ ttm_object_file_release(&vigs_file->obj_file);
+ kfree(vigs_file);
+}
--- /dev/null
+#ifndef _VIGS_FILE_H_
+#define _VIGS_FILE_H_
+
+#include "drmP.h"
+#include <ttm/ttm_object.h>
+
+struct vigs_device;
+
+struct vigs_file
+{
+ struct ttm_object_file *obj_file;
+};
+
+int vigs_file_create(struct vigs_device *vigs_dev,
+ struct vigs_file **vigs_file);
+
+void vigs_file_destroy(struct vigs_file *vigs_file);
+
+#endif
--- /dev/null
+#include "vigs_framebuffer.h"
+#include "vigs_device.h"
+#include "vigs_surface.h"
+#include "vigs_fbdev.h"
+#include "vigs_comm.h"
+#include "drm_crtc_helper.h"
+#include <drm/vigs_drm.h>
+
+static void vigs_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct vigs_framebuffer *vigs_fb = fb_to_vigs_fb(fb);
+ int i;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ /*
+ * First, we need to call 'drm_framebuffer_cleanup', this'll
+ * automatically call 'vigs_crtc_disable' if needed, thus, notifying
+ * the host that root surface is gone.
+ */
+
+ drm_framebuffer_cleanup(fb);
+
+ /*
+ * And we can finally free the GEMs.
+ */
+
+ for (i = 0; i < 4; ++i) {
+ if (vigs_fb->surfaces[i]) {
+ drm_gem_object_unreference_unlocked(&vigs_fb->surfaces[i]->gem.base);
+ }
+ }
+ kfree(vigs_fb);
+}
+
+static int vigs_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct vigs_framebuffer *vigs_fb = fb_to_vigs_fb(fb);
+
+ DRM_DEBUG_KMS("enter\n");
+
+ return drm_gem_handle_create(file_priv, &vigs_fb->surfaces[0]->gem.base, handle);
+}
+
+static int vigs_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ return 0;
+}
+
+static struct drm_framebuffer_funcs vigs_framebuffer_funcs =
+{
+ .destroy = vigs_framebuffer_destroy,
+ .create_handle = vigs_framebuffer_create_handle,
+ .dirty = vigs_framebuffer_dirty,
+};
+
+static struct drm_framebuffer *vigs_fb_create(struct drm_device *drm_dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct vigs_surface *surfaces[4];
+ int ret, i;
+ int num_planes = drm_format_num_planes(mode_cmd->pixel_format);
+ struct vigs_framebuffer *vigs_fb;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ for (i = 0; i < num_planes; ++i) {
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+
+ gem = drm_gem_object_lookup(drm_dev, file_priv, mode_cmd->handles[i]);
+
+ if (!gem) {
+ DRM_ERROR("GEM lookup failed, handle = %u\n", mode_cmd->handles[i]);
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ if (vigs_gem->type != VIGS_GEM_TYPE_SURFACE) {
+ DRM_ERROR("GEM is not a surface, handle = %u\n", mode_cmd->handles[i]);
+ drm_gem_object_unreference_unlocked(gem);
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ surfaces[i] = vigs_gem_to_vigs_surface(vigs_gem);
+ }
+
+ vigs_fb = kzalloc(sizeof(*vigs_fb), GFP_KERNEL);
+
+ if (!vigs_fb) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ vigs_fb->comm = vigs_dev->comm;
+
+ for (i = 0; i < num_planes; ++i) {
+ vigs_fb->surfaces[i] = surfaces[i];
+ }
+
+ ret = drm_framebuffer_init(vigs_dev->drm_dev,
+ &vigs_fb->base,
+ &vigs_framebuffer_funcs);
+
+ if (ret != 0) {
+ DRM_ERROR("unable to create the framebuffer: %d\n", ret);
+ kfree(vigs_fb);
+ goto fail;
+ }
+
+ drm_helper_mode_fill_fb_struct(&vigs_fb->base, mode_cmd);
+
+ return &vigs_fb->base;
+
+fail:
+ for (i--; i >= 0; i--) {
+ drm_gem_object_unreference_unlocked(&surfaces[i]->gem.base);
+ }
+
+ return ERR_PTR(ret);
+}
+
+static void vigs_output_poll_changed(struct drm_device *drm_dev)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ if (vigs_dev->fbdev) {
+ vigs_fbdev_output_poll_changed(vigs_dev->fbdev);
+ }
+}
+
+static struct drm_mode_config_funcs vigs_mode_config_funcs =
+{
+ .fb_create = vigs_fb_create,
+ .output_poll_changed = vigs_output_poll_changed
+};
+
+void vigs_framebuffer_config_init(struct vigs_device *vigs_dev)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ vigs_dev->drm_dev->mode_config.min_width = 0;
+ vigs_dev->drm_dev->mode_config.min_height = 0;
+
+ vigs_dev->drm_dev->mode_config.max_width = 4096;
+ vigs_dev->drm_dev->mode_config.max_height = 4096;
+
+ vigs_dev->drm_dev->mode_config.funcs = &vigs_mode_config_funcs;
+}
+
+int vigs_framebuffer_create(struct vigs_device *vigs_dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct vigs_surface *fb_sfc,
+ struct vigs_framebuffer **vigs_fb)
+{
+ int ret = 0;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ *vigs_fb = kzalloc(sizeof(**vigs_fb), GFP_KERNEL);
+
+ if (!*vigs_fb) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ if ((fb_sfc->width != mode_cmd->width) ||
+ (fb_sfc->height != mode_cmd->height) ||
+ (fb_sfc->stride != mode_cmd->pitches[0])) {
+ DRM_DEBUG_KMS("surface format mismatch, sfc - (%u,%u,%u), mode - (%u,%u,%u)\n",
+ fb_sfc->width, fb_sfc->height, fb_sfc->stride,
+ mode_cmd->width, mode_cmd->height, mode_cmd->pitches[0]);
+ ret = -EINVAL;
+ goto fail2;
+ }
+
+ (*vigs_fb)->comm = vigs_dev->comm;
+ (*vigs_fb)->surfaces[0] = fb_sfc;
+
+ ret = drm_framebuffer_init(vigs_dev->drm_dev,
+ &(*vigs_fb)->base,
+ &vigs_framebuffer_funcs);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ drm_helper_mode_fill_fb_struct(&(*vigs_fb)->base, mode_cmd);
+
+ drm_gem_object_reference(&fb_sfc->gem.base);
+
+ return 0;
+
+fail2:
+ kfree(*vigs_fb);
+fail1:
+ *vigs_fb = NULL;
+
+ return ret;
+}
+
+int vigs_framebuffer_pin(struct vigs_framebuffer *vigs_fb)
+{
+ int ret;
+
+ vigs_gem_reserve(&vigs_fb->surfaces[0]->gem);
+
+ ret = vigs_gem_pin(&vigs_fb->surfaces[0]->gem);
+
+ vigs_gem_unreserve(&vigs_fb->surfaces[0]->gem);
+
+ return ret;
+}
+
+void vigs_framebuffer_unpin(struct vigs_framebuffer *vigs_fb)
+{
+ vigs_gem_reserve(&vigs_fb->surfaces[0]->gem);
+
+ vigs_gem_unpin(&vigs_fb->surfaces[0]->gem);
+
+ vigs_gem_unreserve(&vigs_fb->surfaces[0]->gem);
+}
--- /dev/null
+#ifndef _VIGS_FRAMEBUFFER_H_
+#define _VIGS_FRAMEBUFFER_H_
+
+#include "drmP.h"
+#include "vigs_protocol.h"
+
+struct vigs_device;
+struct vigs_comm;
+struct vigs_surface;
+
+struct vigs_framebuffer
+{
+ struct drm_framebuffer base;
+
+ /*
+ * Cached from 'vigs_device' for speed.
+ */
+ struct vigs_comm *comm;
+
+ struct vigs_surface *surfaces[4];
+};
+
+static inline struct vigs_framebuffer *fb_to_vigs_fb(struct drm_framebuffer *fb)
+{
+ return container_of(fb, struct vigs_framebuffer, base);
+}
+
+void vigs_framebuffer_config_init(struct vigs_device *vigs_dev);
+
+/*
+ * Creates a framebuffer object.
+ * Note that it also gets a reference to 'fb_gem' (in case of success), so
+ * don't forget to unreference it in the calling code.
+ */
+int vigs_framebuffer_create(struct vigs_device *vigs_dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct vigs_surface *fb_sfc,
+ struct vigs_framebuffer **vigs_fb);
+
+int vigs_framebuffer_pin(struct vigs_framebuffer *vigs_fb);
+void vigs_framebuffer_unpin(struct vigs_framebuffer *vigs_fb);
+
+#endif
--- /dev/null
+#include "vigs_gem.h"
+#include "vigs_device.h"
+#include "vigs_mman.h"
+#include "vigs_surface.h"
+#include "vigs_dp.h"
+#include <drm/vigs_drm.h>
+#include <ttm/ttm_placement.h>
+
+static void vigs_gem_bo_destroy(struct ttm_buffer_object *bo)
+{
+ struct vigs_gem_object *vigs_gem = bo_to_vigs_gem(bo);
+
+ if (vigs_gem->destroy) {
+ vigs_gem->destroy(vigs_gem);
+ }
+
+ drm_gem_object_release(&vigs_gem->base);
+ kfree(vigs_gem);
+}
+
+int vigs_gem_init(struct vigs_gem_object *vigs_gem,
+ struct vigs_device *vigs_dev,
+ enum ttm_object_type type,
+ unsigned long size,
+ bool kernel,
+ vigs_gem_destroy_func destroy)
+{
+ u32 placements[1];
+ struct ttm_placement placement;
+ enum ttm_bo_type bo_type;
+ int ret = 0;
+
+ size = roundup(size, PAGE_SIZE);
+
+ if (size == 0) {
+ kfree(vigs_gem);
+ return -EINVAL;
+ }
+
+ if (type == VIGS_GEM_TYPE_SURFACE) {
+ placements[0] =
+ TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT | TTM_PL_FLAG_NO_EVICT;
+ } else if (type == VIGS_GEM_TYPE_EXECBUFFER) {
+ placements[0] =
+ TTM_PL_FLAG_WC | TTM_PL_FLAG_PRIV0 | TTM_PL_FLAG_NO_EVICT;
+ } else {
+ kfree(vigs_gem);
+ return -EINVAL;
+ }
+
+ memset(&placement, 0, sizeof(placement));
+
+ placement.placement = placements;
+ placement.busy_placement = placements;
+ placement.num_placement = 1;
+ placement.num_busy_placement = 1;
+
+ if (kernel) {
+ bo_type = ttm_bo_type_kernel;
+ } else {
+ bo_type = ttm_bo_type_device;
+ }
+
+ if (unlikely(vigs_dev->mman->bo_dev.dev_mapping == NULL)) {
+ vigs_dev->mman->bo_dev.dev_mapping = vigs_dev->drm_dev->dev_mapping;
+ }
+
+ ret = drm_gem_object_init(vigs_dev->drm_dev, &vigs_gem->base, size);
+
+ if (ret != 0) {
+ kfree(vigs_gem);
+ return ret;
+ }
+
+ ret = ttm_bo_init(&vigs_dev->mman->bo_dev, &vigs_gem->bo, size, bo_type,
+ &placement, 0,
+ false, NULL, 0, NULL,
+ &vigs_gem_bo_destroy);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ vigs_gem->type = type;
+ vigs_gem->pin_count = 0;
+ vigs_gem->destroy = destroy;
+
+ DRM_DEBUG_DRIVER("GEM created (type = %u, off = 0x%llX, sz = %lu)\n",
+ type,
+ vigs_gem_mmap_offset(vigs_gem),
+ vigs_gem_size(vigs_gem));
+
+ return 0;
+}
+
+void vigs_gem_cleanup(struct vigs_gem_object *vigs_gem)
+{
+ struct ttm_buffer_object *bo = &vigs_gem->bo;
+
+ ttm_bo_unref(&bo);
+}
+
+int vigs_gem_pin(struct vigs_gem_object *vigs_gem)
+{
+ u32 placements[1];
+ struct ttm_placement placement;
+ int ret;
+
+ if (vigs_gem->pin_count) {
+ ++vigs_gem->pin_count;
+ return 0;
+ }
+
+ if (vigs_gem->type == VIGS_GEM_TYPE_EXECBUFFER) {
+ vigs_gem->pin_count = 1;
+
+ return 0;
+ }
+
+ placements[0] =
+ TTM_PL_FLAG_WC | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_NO_EVICT;
+
+ memset(&placement, 0, sizeof(placement));
+
+ placement.placement = placements;
+ placement.busy_placement = placements;
+ placement.num_placement = 1;
+ placement.num_busy_placement = 1;
+
+ ret = ttm_bo_validate(&vigs_gem->bo, &placement, false, false);
+
+ if (ret != 0) {
+ DRM_ERROR("GEM pin failed (type = %u, off = 0x%llX, sz = %lu)\n",
+ vigs_gem->type,
+ vigs_gem_mmap_offset(vigs_gem),
+ vigs_gem_size(vigs_gem));
+ return ret;
+ }
+
+ vigs_gem->pin_count = 1;
+
+ DRM_DEBUG_DRIVER("GEM pinned (type = %u, off = 0x%llX, sz = %lu)\n",
+ vigs_gem->type,
+ vigs_gem_mmap_offset(vigs_gem),
+ vigs_gem_size(vigs_gem));
+
+ return 0;
+}
+
+void vigs_gem_unpin(struct vigs_gem_object *vigs_gem)
+{
+ u32 placements[2];
+ struct ttm_placement placement;
+ int ret;
+
+ BUG_ON(vigs_gem->pin_count == 0);
+
+ if (--vigs_gem->pin_count > 0) {
+ return;
+ }
+
+ if (vigs_gem->type == VIGS_GEM_TYPE_EXECBUFFER) {
+ return;
+ }
+
+ vigs_gem_kunmap(vigs_gem);
+
+ placements[0] =
+ TTM_PL_FLAG_WC | TTM_PL_FLAG_VRAM;
+ placements[1] =
+ TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT | TTM_PL_FLAG_NO_EVICT;
+
+ memset(&placement, 0, sizeof(placement));
+
+ placement.placement = placements;
+ placement.busy_placement = placements;
+ placement.num_placement = 2;
+ placement.num_busy_placement = 2;
+
+ ret = ttm_bo_validate(&vigs_gem->bo, &placement, false, false);
+
+ if (ret != 0) {
+ DRM_ERROR("GEM unpin failed (type = %u, off = 0x%llX, sz = %lu)\n",
+ vigs_gem->type,
+ vigs_gem_mmap_offset(vigs_gem),
+ vigs_gem_size(vigs_gem));
+ } else {
+ DRM_DEBUG_DRIVER("GEM unpinned (type = %u, off = 0x%llX, sz = %lu)\n",
+ vigs_gem->type,
+ vigs_gem_mmap_offset(vigs_gem),
+ vigs_gem_size(vigs_gem));
+ }
+}
+
+int vigs_gem_kmap(struct vigs_gem_object *vigs_gem)
+{
+ bool is_iomem;
+ int ret;
+
+ BUG_ON((vigs_gem->type == VIGS_GEM_TYPE_SURFACE) &&
+ (vigs_gem->pin_count == 0));
+
+ if (vigs_gem->kptr) {
+ return 0;
+ }
+
+ ret = ttm_bo_kmap(&vigs_gem->bo,
+ 0,
+ vigs_gem->bo.num_pages,
+ &vigs_gem->kmap);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ vigs_gem->kptr = ttm_kmap_obj_virtual(&vigs_gem->kmap, &is_iomem);
+
+ DRM_DEBUG_DRIVER("GEM (type = %u, off = 0x%llX, sz = %lu) mapped to 0x%p\n",
+ vigs_gem->type,
+ vigs_gem_mmap_offset(vigs_gem),
+ vigs_gem_size(vigs_gem),
+ vigs_gem->kptr);
+
+ return 0;
+}
+
+void vigs_gem_kunmap(struct vigs_gem_object *vigs_gem)
+{
+ if (vigs_gem->kptr == NULL) {
+ return;
+ }
+
+ vigs_gem->kptr = NULL;
+
+ ttm_bo_kunmap(&vigs_gem->kmap);
+
+ DRM_DEBUG_DRIVER("GEM (type = %u, off = 0x%llX, sz = %lu) unmapped\n",
+ vigs_gem->type,
+ vigs_gem_mmap_offset(vigs_gem),
+ vigs_gem_size(vigs_gem));
+}
+
+int vigs_gem_in_vram(struct vigs_gem_object *vigs_gem)
+{
+ return vigs_gem->bo.mem.mem_type == TTM_PL_VRAM;
+}
+
+int vigs_gem_wait(struct vigs_gem_object *vigs_gem)
+{
+ int ret;
+
+ spin_lock(&vigs_gem->bo.bdev->fence_lock);
+
+ ret = ttm_bo_wait(&vigs_gem->bo, true, false, false);
+
+ spin_unlock(&vigs_gem->bo.bdev->fence_lock);
+
+ return ret;
+}
+
+void vigs_gem_free_object(struct drm_gem_object *gem)
+{
+ struct vigs_gem_object *vigs_gem = gem_to_vigs_gem(gem);
+
+ if (vigs_gem->type == VIGS_GEM_TYPE_SURFACE) {
+ struct vigs_device *vigs_dev = gem->dev->dev_private;
+
+ vigs_dp_remove_surface(vigs_dev->dp,
+ vigs_gem_to_vigs_surface(vigs_gem));
+ }
+
+ vigs_gem_reserve(vigs_gem);
+
+ vigs_gem_kunmap(vigs_gem);
+
+ vigs_gem_unreserve(vigs_gem);
+
+ vigs_gem->freed = true;
+
+ DRM_DEBUG_DRIVER("GEM free (type = %u, off = 0x%llX, sz = %lu)\n",
+ vigs_gem->type,
+ vigs_gem_mmap_offset(vigs_gem),
+ vigs_gem_size(vigs_gem));
+
+ vigs_gem_cleanup(vigs_gem);
+}
+
+int vigs_gem_open_object(struct drm_gem_object *gem,
+ struct drm_file *file_priv)
+{
+ return 0;
+}
+
+void vigs_gem_close_object(struct drm_gem_object *gem,
+ struct drm_file *file_priv)
+{
+}
+
+int vigs_gem_map_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct drm_vigs_gem_map *args = data;
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+ struct mm_struct *mm = current->mm;
+ unsigned long address, unused;
+
+ gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
+
+ if (gem == NULL) {
+ return -ENOENT;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ down_write(&mm->mmap_sem);
+
+ /*
+ * We can't use 'do_mmap' here (like in i915, exynos and others) because
+ * 'do_mmap' takes an offset in bytes and our
+ * offset is 64-bit (since it's TTM offset) and it can't fit into 32-bit
+ * variable.
+ */
+ vigs_dev->track_gem_access = args->track_access;
+ address = do_mmap_pgoff(file_priv->filp, 0, vigs_gem_size(vigs_gem),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ vigs_gem_mmap_offset(vigs_gem) >> PAGE_SHIFT,
+ &unused);
+ vigs_dev->track_gem_access = false;
+
+ up_write(&mm->mmap_sem);
+
+ drm_gem_object_unreference_unlocked(gem);
+
+ if (IS_ERR((void*)address)) {
+ return PTR_ERR((void*)address);
+ }
+
+ args->address = address;
+
+ return 0;
+}
+
+int vigs_gem_wait_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vigs_gem_wait *args = data;
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+ int ret;
+
+ gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
+
+ if (gem == NULL) {
+ return -ENOENT;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ vigs_gem_reserve(vigs_gem);
+
+ ret = vigs_gem_wait(vigs_gem);
+
+ vigs_gem_unreserve(vigs_gem);
+
+ drm_gem_object_unreference_unlocked(gem);
+
+ return ret;
+}
+
+int vigs_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm_dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct vigs_surface *sfc = NULL;
+ uint32_t handle;
+ int ret;
+
+ if (args->bpp != 32) {
+ DRM_ERROR("Only 32 bpp surfaces are supported for now\n");
+ return -EINVAL;
+ }
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+
+ ret = vigs_surface_create(vigs_dev,
+ args->width,
+ args->height,
+ args->pitch,
+ vigsp_surface_bgrx8888,
+ true,
+ &sfc);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ args->size = vigs_gem_size(&sfc->gem);
+
+ ret = drm_gem_handle_create(file_priv,
+ &sfc->gem.base,
+ &handle);
+
+ drm_gem_object_unreference_unlocked(&sfc->gem.base);
+
+ if (ret == 0) {
+ args->handle = handle;
+ }
+
+ return 0;
+}
+
+int vigs_gem_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *drm_dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file_priv, handle);
+}
+
+int vigs_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *drm_dev,
+ uint32_t handle, uint64_t *offset_p)
+{
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+
+ BUG_ON(!offset_p);
+
+ gem = drm_gem_object_lookup(drm_dev, file_priv, handle);
+
+ if (gem == NULL) {
+ return -ENOENT;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ *offset_p = vigs_gem_mmap_offset(vigs_gem);
+
+ drm_gem_object_unreference_unlocked(gem);
+
+ return 0;
+}
--- /dev/null
+#ifndef _VIGS_GEM_H_
+#define _VIGS_GEM_H_
+
+#include "drmP.h"
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_object.h>
+
+#define VIGS_GEM_TYPE_SURFACE ttm_driver_type0
+#define VIGS_GEM_TYPE_EXECBUFFER ttm_driver_type1
+
+struct vigs_device;
+struct vigs_gem_object;
+
+typedef void (*vigs_gem_destroy_func)(struct vigs_gem_object *vigs_gem);
+
+struct vigs_gem_object
+{
+ struct drm_gem_object base;
+
+ struct ttm_buffer_object bo;
+
+ /*
+ * Indicates that drm_driver::gem_free_object was called.
+ */
+ bool freed;
+
+ enum ttm_object_type type;
+
+ /*
+ * Valid only after successful call to 'vigs_gem_kmap'.
+ * @{
+ */
+
+ struct ttm_bo_kmap_obj kmap;
+ void *kptr; /* Kernel pointer to buffer data. */
+
+ /*
+ * @}
+ */
+
+ volatile unsigned pin_count;
+
+ vigs_gem_destroy_func destroy;
+};
+
+static inline struct vigs_gem_object *gem_to_vigs_gem(struct drm_gem_object *gem)
+{
+ return container_of(gem, struct vigs_gem_object, base);
+}
+
+static inline struct vigs_gem_object *bo_to_vigs_gem(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct vigs_gem_object, bo);
+}
+
+/*
+ * Must be called with drm_device::struct_mutex held.
+ * @{
+ */
+
+static inline bool vigs_gem_freed(struct vigs_gem_object *vigs_gem)
+{
+ return vigs_gem->freed;
+}
+
+/*
+ * @}
+ */
+
+/*
+ * Initializes a gem object. 'size' is automatically rounded up to page size.
+ * 'vigs_gem' is kfree'd on failure.
+ */
+int vigs_gem_init(struct vigs_gem_object *vigs_gem,
+ struct vigs_device *vigs_dev,
+ enum ttm_object_type type,
+ unsigned long size,
+ bool kernel,
+ vigs_gem_destroy_func destroy);
+
+void vigs_gem_cleanup(struct vigs_gem_object *vigs_gem);
+
+/*
+ * Buffer size.
+ */
+static inline unsigned long vigs_gem_size(struct vigs_gem_object *vigs_gem)
+{
+ return vigs_gem->bo.num_pages << PAGE_SHIFT;
+}
+
+/*
+ * GEM offset in a placement. In case of execbuffer always the same.
+ * In case of surface only valid when GEM is in VRAM.
+ */
+static inline unsigned long vigs_gem_offset(struct vigs_gem_object *vigs_gem)
+{
+ return vigs_gem->bo.offset;
+}
+
+/*
+ * GEM offset relative to DRM_FILE_OFFSET. For kernel buffers it's always 0.
+ */
+static inline u64 vigs_gem_mmap_offset(struct vigs_gem_object *vigs_gem)
+{
+ return drm_vma_node_offset_addr(&vigs_gem->bo.vma_node);
+}
+
+static inline void vigs_gem_reserve(struct vigs_gem_object *vigs_gem)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&vigs_gem->bo, false, false, false, 0);
+
+ BUG_ON(ret != 0);
+}
+
+static inline void vigs_gem_unreserve(struct vigs_gem_object *vigs_gem)
+{
+ ttm_bo_unreserve(&vigs_gem->bo);
+}
+
+/*
+ * Functions below MUST be called between
+ * vigs_gem_reserve/vigs_gem_unreserve.
+ * @{
+ */
+
+/*
+ * Pin/unpin GEM. For execbuffers this is a no-op, since they're always
+ * in RAM placement. For surfaces this pins the GEM into VRAM. The
+ * operation can fail if there's no room in VRAM and all GEMs currently
+ * in VRAM are pinned.
+ * @{
+ */
+int vigs_gem_pin(struct vigs_gem_object *vigs_gem);
+void vigs_gem_unpin(struct vigs_gem_object *vigs_gem);
+/*
+ * @}
+ */
+
+/*
+ * Surface GEMs must be pinned before calling these.
+ * @{
+ */
+int vigs_gem_kmap(struct vigs_gem_object *vigs_gem);
+void vigs_gem_kunmap(struct vigs_gem_object *vigs_gem);
+/*
+ * @}
+ */
+
+/*
+ * true if GEM is currently in VRAM. Note that this doesn't
+ * necessarily mean that it's pinned.
+ */
+int vigs_gem_in_vram(struct vigs_gem_object *vigs_gem);
+
+int vigs_gem_wait(struct vigs_gem_object *vigs_gem);
+
+/*
+ * @}
+ */
+
+/*
+ * Driver hooks.
+ * @{
+ */
+
+void vigs_gem_free_object(struct drm_gem_object *gem);
+
+int vigs_gem_open_object(struct drm_gem_object *gem,
+ struct drm_file *file_priv);
+
+void vigs_gem_close_object(struct drm_gem_object *gem,
+ struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
+/*
+ * IOCTLs
+ * @{
+ */
+
+int vigs_gem_map_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_gem_wait_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
+/*
+ * Dumb
+ * @{
+ */
+
+int vigs_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm_dev,
+ struct drm_mode_create_dumb *args);
+
+int vigs_gem_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *drm_dev,
+ uint32_t handle);
+
+int vigs_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *drm_dev,
+ uint32_t handle, uint64_t *offset_p);
+
+/*
+ * @}
+ */
+
+#endif
--- /dev/null
+#include "vigs_irq.h"
+#include "vigs_device.h"
+#include "vigs_regs.h"
+#include "vigs_fenceman.h"
+
+static void vigs_finish_pageflips(struct vigs_device *vigs_dev)
+{
+ struct drm_pending_vblank_event *event, *tmp;
+ struct timeval now;
+ unsigned long flags;
+ bool is_checked = false;
+
+ spin_lock_irqsave(&vigs_dev->drm_dev->event_lock, flags);
+
+ list_for_each_entry_safe(event, tmp,
+ &vigs_dev->pageflip_event_list,
+ base.link) {
+ if (event->pipe != 0) {
+ continue;
+ }
+
+ is_checked = true;
+
+ do_gettimeofday(&now);
+ event->event.sequence = 0;
+ event->event.tv_sec = now.tv_sec;
+ event->event.tv_usec = now.tv_usec;
+
+ list_move_tail(&event->base.link, &event->base.file_priv->event_list);
+ wake_up_interruptible(&event->base.file_priv->event_wait);
+ }
+
+ if (is_checked) {
+ /*
+ * Call 'drm_vblank_put' only in case that 'drm_vblank_get' was
+ * called.
+ */
+ if (atomic_read(&vigs_dev->drm_dev->vblank[0].refcount) > 0) {
+ drm_vblank_put(vigs_dev->drm_dev, 0);
+ }
+ }
+
+ spin_unlock_irqrestore(&vigs_dev->drm_dev->event_lock, flags);
+}
+
+int vigs_enable_vblank(struct drm_device *drm_dev, int crtc)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ u32 value;
+
+ DRM_DEBUG_KMS("enter: crtc = %d\n", crtc);
+
+ if (crtc != 0) {
+ DRM_ERROR("bad crtc = %d", crtc);
+ return -EINVAL;
+ }
+
+ value = VIGS_REG_CON_VBLANK_ENABLE;
+
+ writel(value, vigs_dev->io_map->handle + VIGS_REG_CON);
+
+ return 0;
+}
+
+void vigs_disable_vblank(struct drm_device *drm_dev, int crtc)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ u32 value;
+
+ DRM_DEBUG_KMS("enter: crtc = %d\n", crtc);
+
+ if (crtc != 0) {
+ DRM_ERROR("bad crtc = %d", crtc);
+ }
+
+ value = 0;
+
+ writel(value, vigs_dev->io_map->handle + VIGS_REG_CON);
+}
+
+irqreturn_t vigs_irq_handler(int irq, void *arg)
+{
+ struct drm_device *drm_dev = (struct drm_device*)arg;
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ u32 int_value;
+ irqreturn_t ret = IRQ_NONE;
+
+ int_value = readl(vigs_dev->io_map->handle + VIGS_REG_INT);
+
+ if ((int_value & (VIGS_REG_INT_VBLANK_PENDING | VIGS_REG_INT_FENCE_ACK_PENDING)) != 0) {
+ /*
+ * Clear the interrupt first in order
+ * not to stall the hardware.
+ */
+
+ writel(int_value, vigs_dev->io_map->handle + VIGS_REG_INT);
+
+ ret = IRQ_HANDLED;
+ }
+
+ if ((int_value & VIGS_REG_INT_FENCE_ACK_PENDING) != 0) {
+ u32 lower, upper;
+
+ while (1) {
+ spin_lock(&vigs_dev->irq_lock);
+
+ lower = readl(vigs_dev->io_map->handle + VIGS_REG_FENCE_LOWER);
+ upper = readl(vigs_dev->io_map->handle + VIGS_REG_FENCE_UPPER);
+
+ spin_unlock(&vigs_dev->irq_lock);
+
+ if (lower) {
+ vigs_fenceman_ack(vigs_dev->fenceman, lower, upper);
+ } else {
+ break;
+ }
+ }
+ }
+
+ if ((int_value & VIGS_REG_INT_VBLANK_PENDING) != 0) {
+ drm_handle_vblank(drm_dev, 0);
+
+ vigs_finish_pageflips(vigs_dev);
+ }
+
+ return ret;
+}
--- /dev/null
+#ifndef _VIGS_IRQ_H_
+#define _VIGS_IRQ_H_
+
+#include "drmP.h"
+
+int vigs_enable_vblank(struct drm_device *drm_dev, int crtc);
+
+void vigs_disable_vblank(struct drm_device *drm_dev, int crtc);
+
+irqreturn_t vigs_irq_handler(int irq, void *arg);
+
+#endif
--- /dev/null
+#include "vigs_mman.h"
+#include "vigs_fence.h"
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_page_alloc.h>
+
+/*
+ * This is TTM-based memory manager for VIGS, it supports 4 memory placements:
+ * CPU - This is for target-only memory, not shared with host, forced by TTM,
+ * not used.
+ * GPU - This is host-only memory, not shared with target.
+ * VRAM - This gets allocated on "VRAM" PCI BAR, shared with host, used
+ * for surface placement.
+ * RAM - This gets allocated on "RAM" PCI BAR, shared with host, used for
+ * execbuffer placement.
+ *
+ * Eviction is supported, so buffers can be moved between some placements.
+ * Allowed movements:
+ * VRAM -> GPU
+ * GPU -> VRAM
+ */
+
+/*
+ * Offsets for mmap will start at DRM_FILE_OFFSET
+ */
+#define DRM_FILE_OFFSET 0x100000000ULL
+#define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT)
+
+/*
+ * DRM_GLOBAL_TTM_MEM init/release thunks
+ * @{
+ */
+
+static int vigs_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void vigs_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+/*
+ * @}
+ */
+
+/*
+ * Here we initialize mman::bo_global_ref and mman::mem_global_ref.
+ * This is required in order to bring up TTM bo subsystem and TTM memory
+ * subsystem if they aren't already up. The first one who
+ * calls 'drm_global_item_ref' automatically initializes the specified
+ * subsystem and the last one who calls 'drm_global_item_unref' automatically
+ * brings down the specified subsystem.
+ * @{
+ */
+
+static int vigs_mman_global_init(struct vigs_mman *mman)
+{
+ struct drm_global_reference *global_ref = NULL;
+ int ret = 0;
+
+ global_ref = &mman->mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &vigs_ttm_mem_global_init;
+ global_ref->release = &vigs_ttm_mem_global_release;
+
+ ret = drm_global_item_ref(global_ref);
+
+ if (ret != 0) {
+ DRM_ERROR("failed setting up TTM memory subsystem: %d\n", ret);
+ return ret;
+ }
+
+ mman->bo_global_ref.mem_glob = mman->mem_global_ref.object;
+ global_ref = &mman->bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+
+ ret = drm_global_item_ref(global_ref);
+
+ if (ret != 0) {
+ DRM_ERROR("failed setting up TTM bo subsystem: %d\n", ret);
+ drm_global_item_unref(&mman->mem_global_ref);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vigs_mman_global_cleanup(struct vigs_mman *mman)
+{
+ drm_global_item_unref(&mman->bo_global_ref.ref);
+ drm_global_item_unref(&mman->mem_global_ref);
+}
+
+/*
+ * @}
+ */
+
+/*
+ * TTM backend functions.
+ * @{
+ */
+
+static int vigs_ttm_backend_bind(struct ttm_tt *tt,
+ struct ttm_mem_reg *bo_mem)
+{
+ return 0;
+}
+
+static int vigs_ttm_backend_unbind(struct ttm_tt *tt)
+{
+ return 0;
+}
+
+static void vigs_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ struct ttm_dma_tt *dma_tt = (void*)tt;
+
+ ttm_dma_tt_fini(dma_tt);
+ kfree(dma_tt);
+}
+
+static struct ttm_backend_func vigs_ttm_backend_func = {
+ .bind = &vigs_ttm_backend_bind,
+ .unbind = &vigs_ttm_backend_unbind,
+ .destroy = &vigs_ttm_backend_destroy,
+};
+
+static struct ttm_tt *vigs_ttm_tt_create(struct ttm_bo_device *bo_dev,
+ unsigned long size,
+ uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_dma_tt *dma_tt;
+ int ret;
+
+ dma_tt = kzalloc(sizeof(struct ttm_dma_tt), GFP_KERNEL);
+
+ if (dma_tt == NULL) {
+ DRM_ERROR("cannot allocate ttm_dma_tt: OOM\n");
+ return NULL;
+ }
+
+ dma_tt->ttm.func = &vigs_ttm_backend_func;
+
+ ret = ttm_dma_tt_init(dma_tt, bo_dev, size, page_flags,
+ dummy_read_page);
+
+ if (ret != 0) {
+ DRM_ERROR("ttm_dma_tt_init failed: %d\n", ret);
+ kfree(dma_tt);
+ return NULL;
+ }
+
+ return &dma_tt->ttm;
+}
+
+/*
+ * @}
+ */
+
+static int vigs_ttm_invalidate_caches(struct ttm_bo_device *bo_dev,
+ uint32_t flags)
+{
+ return 0;
+}
+
+static int vigs_ttm_init_mem_type(struct ttm_bo_device *bo_dev,
+ uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_TT:
+ man->func = &ttm_bo_manager_func;
+ man->gpu_offset = 0;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
+ TTM_MEMTYPE_FLAG_CMA;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ case TTM_PL_PRIV0:
+ man->func = &ttm_bo_manager_func;
+ man->gpu_offset = 0;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("unsupported memory type: %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const u32 evict_placements[1] =
+{
+ TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT | TTM_PL_FLAG_NO_EVICT
+};
+
+static const struct ttm_placement evict_placement =
+{
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = ARRAY_SIZE(evict_placements),
+ .placement = evict_placements,
+ .num_busy_placement = ARRAY_SIZE(evict_placements),
+ .busy_placement = evict_placements
+};
+
+static void vigs_ttm_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ BUG_ON(bo->mem.mem_type != TTM_PL_VRAM);
+
+ *placement = evict_placement;
+}
+
+static int vigs_ttm_move(struct ttm_buffer_object *bo,
+ bool evict,
+ bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ struct vigs_mman *mman = bo_dev_to_vigs_mman(bo->bdev);
+ struct ttm_mem_reg *old_mem = &bo->mem;
+
+ if ((old_mem->mem_type == TTM_PL_VRAM) &&
+ (new_mem->mem_type == TTM_PL_TT)) {
+ mman->ops->vram_to_gpu(mman->user_data, bo);
+
+ ttm_bo_mem_put(bo, old_mem);
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+
+ return 0;
+ } else if ((old_mem->mem_type == TTM_PL_TT) &&
+ (new_mem->mem_type == TTM_PL_VRAM)) {
+ mman->ops->gpu_to_vram(mman->user_data, bo,
+ (new_mem->start << PAGE_SHIFT) +
+ bo->bdev->man[new_mem->mem_type].gpu_offset);
+
+ ttm_bo_mem_put(bo, old_mem);
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+
+ return 0;
+ } else {
+ return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ }
+}
+
+static int vigs_ttm_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp)
+{
+ return 0;
+}
+
+static bool vigs_ttm_sync_obj_signaled(void *sync_obj)
+{
+ return vigs_fence_signaled((struct vigs_fence*)sync_obj);
+}
+
+static int vigs_ttm_sync_obj_wait(void *sync_obj,
+ bool lazy,
+ bool interruptible)
+{
+ return vigs_fence_wait((struct vigs_fence*)sync_obj, interruptible);
+}
+
+static int vigs_ttm_sync_obj_flush(void *sync_obj)
+{
+ return 0;
+}
+
+static void vigs_ttm_sync_obj_unref(void **sync_obj)
+{
+ struct vigs_fence* fence = *sync_obj;
+ vigs_fence_unref(fence);
+ *sync_obj = NULL;
+}
+
+static void *vigs_ttm_sync_obj_ref(void *sync_obj)
+{
+ vigs_fence_ref((struct vigs_fence*)sync_obj);
+ return sync_obj;
+}
+
+static int vigs_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+ u32 placements[1];
+ struct ttm_placement placement;
+ int ret;
+
+ if (bo->mem.mem_type != TTM_PL_TT) {
+ /*
+ * We're only interested in GPU memory page faults.
+ */
+
+ return 0;
+ }
+
+ /*
+ * It's GPU memory page fault. Move this buffer into VRAM.
+ */
+
+ placements[0] = TTM_PL_FLAG_WC | TTM_PL_FLAG_VRAM;
+
+ memset(&placement, 0, sizeof(placement));
+
+ placement.placement = placements;
+ placement.busy_placement = placements;
+ placement.num_placement = 1;
+ placement.num_busy_placement = 1;
+
+ ret = ttm_bo_validate(bo, &placement, false, false);
+
+ if (ret != 0) {
+ DRM_ERROR("movement failed for 0x%llX\n",
+ drm_vma_node_offset_addr(&bo->vma_node));
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vigs_ttm_io_mem_reserve(struct ttm_bo_device *bo_dev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bo_dev->man[mem->mem_type];
+ struct vigs_mman *mman = bo_dev_to_vigs_mman(bo_dev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) {
+ return -EINVAL;
+ }
+
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ case TTM_PL_TT:
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.is_iomem = true;
+ mem->bus.base = mman->vram_base;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ break;
+ case TTM_PL_PRIV0:
+ mem->bus.is_iomem = true;
+ mem->bus.base = mman->ram_base;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vigs_ttm_io_mem_free(struct ttm_bo_device *bo_dev,
+ struct ttm_mem_reg *mem)
+{
+}
+
+static struct ttm_bo_driver vigs_ttm_bo_driver =
+{
+ .ttm_tt_create = &vigs_ttm_tt_create, /* Needed for ttm_bo_type_kernel and TTM_PL_TT */
+ .ttm_tt_populate = &ttm_pool_populate, /* Needed for TTM_PL_TT */
+ .ttm_tt_unpopulate = &ttm_pool_unpopulate, /* Needed for TTM_PL_TT */
+ .invalidate_caches = &vigs_ttm_invalidate_caches,
+ .init_mem_type = &vigs_ttm_init_mem_type,
+ .evict_flags = &vigs_ttm_evict_flags,
+ .move = &vigs_ttm_move,
+ .verify_access = &vigs_ttm_verify_access,
+ .sync_obj_signaled = vigs_ttm_sync_obj_signaled,
+ .sync_obj_wait = vigs_ttm_sync_obj_wait,
+ .sync_obj_flush = vigs_ttm_sync_obj_flush,
+ .sync_obj_unref = vigs_ttm_sync_obj_unref,
+ .sync_obj_ref = vigs_ttm_sync_obj_ref,
+ .fault_reserve_notify = &vigs_ttm_fault_reserve_notify,
+ .io_mem_reserve = &vigs_ttm_io_mem_reserve,
+ .io_mem_free = &vigs_ttm_io_mem_free,
+};
+
+/*
+ * VMA related.
+ * @{
+ */
+
+static u32 vigs_vma_cache_index = 0;
+static struct vm_operations_struct vigs_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops = NULL;
+
+/*
+ * Represents per-VMA data.
+ *
+ * Since TTM already uses struct vm_area_struct::vm_private_data
+ * we're forced to use some other way to add our own data
+ * to VMA. Currently we use struct vm_area_struct::vm_ops for this.
+ * Generally, TTM should be refactored to not use
+ * struct vm_area_struct directly, but provide helper functions
+ * instead so that user could store whatever he wants into
+ * struct vm_area_struct::vm_private_data.
+ */
+struct vigs_mman_vma
+{
+ struct vm_operations_struct vm_ops;
+ struct ttm_buffer_object *bo;
+ struct kref kref;
+ u8 data[1];
+};
+
+static void vigs_mman_vma_release(struct kref *kref)
+{
+ struct vigs_mman_vma *vigs_vma =
+ container_of(kref, struct vigs_mman_vma, kref);
+ struct ttm_buffer_object *bo = vigs_vma->bo;
+ struct vigs_mman *mman = bo_dev_to_vigs_mman(bo->bdev);
+
+ mman->ops->cleanup_vma(mman->user_data, &vigs_vma->data[0]);
+
+ kmem_cache_free(mman->vma_cache, vigs_vma);
+}
+
+/*
+ * @}
+ */
+
+int vigs_mman_create(resource_size_t vram_base,
+ resource_size_t vram_size,
+ resource_size_t ram_base,
+ resource_size_t ram_size,
+ uint32_t vma_data_size,
+ struct vigs_mman_ops *ops,
+ void *user_data,
+ struct vigs_mman **mman)
+{
+ int ret = 0;
+ char vma_cache_name[100];
+ unsigned long num_pages = 0;
+
+ DRM_DEBUG_DRIVER("enter\n");
+
+ BUG_ON(vma_data_size <= 0);
+
+ *mman = kzalloc(sizeof(**mman), GFP_KERNEL);
+
+ if (!*mman) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ sprintf(vma_cache_name, "vigs_vma_cache%u", vigs_vma_cache_index++);
+
+ (*mman)->vma_cache = kmem_cache_create(vma_cache_name,
+ sizeof(struct vigs_mman_vma) +
+ vma_data_size - 1,
+ 0, 0, NULL);
+
+ if (!(*mman)->vma_cache) {
+ ret = -ENOMEM;
+ goto fail2;
+ }
+
+ ret = vigs_mman_global_init(*mman);
+
+ if (ret != 0) {
+ goto fail3;
+ }
+
+ (*mman)->vram_base = vram_base;
+ (*mman)->ram_base = ram_base;
+ (*mman)->ops = ops;
+ (*mman)->user_data = user_data;
+
+ ret = ttm_bo_device_init(&(*mman)->bo_dev,
+ (*mman)->bo_global_ref.ref.object,
+ &vigs_ttm_bo_driver,
+ DRM_FILE_PAGE_OFFSET,
+ 0);
+ if (ret != 0) {
+ DRM_ERROR("failed initializing bo driver: %d\n", ret);
+ goto fail4;
+ }
+
+ /*
+ * Init GPU
+ * @{
+ */
+
+ /*
+ * For GPU we're only limited by host resources, let the target create
+ * as many buffers as it likes.
+ */
+ ret = ttm_bo_init_mm(&(*mman)->bo_dev,
+ TTM_PL_TT,
+ (0xFFFFFFFFUL / PAGE_SIZE));
+ if (ret != 0) {
+ DRM_ERROR("failed initializing GPU mm\n");
+ goto fail5;
+ }
+
+ /*
+ * @}
+ */
+
+ /*
+ * Init VRAM
+ * @{
+ */
+
+ num_pages = vram_size / PAGE_SIZE;
+
+ ret = ttm_bo_init_mm(&(*mman)->bo_dev,
+ TTM_PL_VRAM,
+ num_pages);
+ if (ret != 0) {
+ DRM_ERROR("failed initializing VRAM mm\n");
+ goto fail6;
+ }
+
+ /*
+ * @}
+ */
+
+ /*
+ * Init RAM
+ * @{
+ */
+
+ num_pages = ram_size / PAGE_SIZE;
+
+ ret = ttm_bo_init_mm(&(*mman)->bo_dev,
+ TTM_PL_PRIV0,
+ num_pages);
+ if (ret != 0) {
+ DRM_ERROR("failed initializing RAM mm\n");
+ goto fail7;
+ }
+
+ /*
+ * @}
+ */
+
+ return 0;
+
+fail7:
+ ttm_bo_clean_mm(&(*mman)->bo_dev, TTM_PL_VRAM);
+fail6:
+ ttm_bo_clean_mm(&(*mman)->bo_dev, TTM_PL_TT);
+fail5:
+ ttm_bo_device_release(&(*mman)->bo_dev);
+fail4:
+ vigs_mman_global_cleanup(*mman);
+fail3:
+ kmem_cache_destroy((*mman)->vma_cache);
+fail2:
+ kfree(*mman);
+fail1:
+ *mman = NULL;
+
+ return ret;
+}
+
+void vigs_mman_destroy(struct vigs_mman *mman)
+{
+ DRM_DEBUG_DRIVER("enter\n");
+
+ ttm_bo_clean_mm(&mman->bo_dev, TTM_PL_PRIV0);
+ ttm_bo_clean_mm(&mman->bo_dev, TTM_PL_VRAM);
+ ttm_bo_clean_mm(&mman->bo_dev, TTM_PL_TT);
+ ttm_bo_device_release(&mman->bo_dev);
+ vigs_mman_global_cleanup(mman);
+ kmem_cache_destroy(mman->vma_cache);
+
+ kfree(mman);
+}
+
+static void vigs_ttm_open(struct vm_area_struct *vma)
+{
+ struct vigs_mman_vma *vigs_vma = (struct vigs_mman_vma*)vma->vm_ops;
+
+ BUG_ON(vigs_vma->bo != (struct ttm_buffer_object*)vma->vm_private_data);
+
+ ttm_vm_ops->open(vma);
+ kref_get(&vigs_vma->kref);
+}
+
+static void vigs_ttm_close(struct vm_area_struct *vma)
+{
+ struct vigs_mman_vma *vigs_vma = (struct vigs_mman_vma*)vma->vm_ops;
+
+ BUG_ON(vigs_vma->bo != (struct ttm_buffer_object*)vma->vm_private_data);
+
+ vma->vm_ops = &vigs_ttm_vm_ops;
+
+ kref_put(&vigs_vma->kref, &vigs_mman_vma_release);
+ ttm_vm_ops->close(vma);
+}
+
+static int vigs_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+
+ if (bo == NULL) {
+ return VM_FAULT_NOPAGE;
+ }
+
+ return ttm_vm_ops->fault(vma, vmf);
+}
+
+int vigs_mman_mmap(struct vigs_mman *mman,
+ struct file *filp,
+ struct vm_area_struct *vma,
+ bool track_access)
+{
+ struct vigs_mman_vma *vigs_vma;
+ int ret;
+ struct ttm_buffer_object *bo;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
+ return drm_mmap(filp, vma);
+ }
+
+ vigs_vma = kmem_cache_alloc(mman->vma_cache, GFP_KERNEL);
+
+ if (!vigs_vma) {
+ return -ENOMEM;
+ }
+
+ ret = ttm_bo_mmap(filp, vma, &mman->bo_dev);
+
+ if (unlikely(ret != 0)) {
+ kmem_cache_free(mman->vma_cache, vigs_vma);
+ return ret;
+ }
+
+ if (unlikely(ttm_vm_ops == NULL)) {
+ ttm_vm_ops = vma->vm_ops;
+ vigs_ttm_vm_ops = *ttm_vm_ops;
+ vigs_ttm_vm_ops.fault = &vigs_ttm_fault;
+ }
+
+ bo = vma->vm_private_data;
+
+ vigs_vma->vm_ops = vigs_ttm_vm_ops;
+ vigs_vma->bo = bo;
+ vigs_vma->vm_ops.open = &vigs_ttm_open;
+ vigs_vma->vm_ops.close = &vigs_ttm_close;
+ kref_init(&vigs_vma->kref);
+ mman->ops->init_vma(mman->user_data,
+ &vigs_vma->data[0],
+ bo,
+ track_access);
+
+ vma->vm_ops = &vigs_vma->vm_ops;
+
+ return 0;
+}
+
+int vigs_mman_access_vma(struct vigs_mman *mman,
+ unsigned long address,
+ vigs_mman_access_vma_func func,
+ void *user_data)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int ret;
+ struct ttm_buffer_object *bo;
+ struct vigs_mman_vma *vigs_vma;
+
+ down_read(&mm->mmap_sem);
+
+ vma = find_vma(mm, address);
+
+ if (!vma ||
+ !vma->vm_ops ||
+ (vma->vm_ops->fault != &vigs_ttm_fault)) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ bo = vma->vm_private_data;
+
+ BUG_ON(!bo);
+
+ if (bo->bdev != &mman->bo_dev) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ vigs_vma = (struct vigs_mman_vma*)vma->vm_ops;
+
+ ret = func(user_data, &vigs_vma->data[0]);
+
+out:
+ up_read(&mm->mmap_sem);
+
+ return ret;
+}
--- /dev/null
+#ifndef _VIGS_MMAN_H_
+#define _VIGS_MMAN_H_
+
+#include "drmP.h"
+#include <linux/slab.h>
+#include <ttm/ttm_bo_driver.h>
+
+struct vigs_mman_ops
+{
+ /*
+ * 'bo' is reserved while calling these.
+ * @{
+ */
+
+ void (*vram_to_gpu)(void *user_data, struct ttm_buffer_object *bo);
+ void (*gpu_to_vram)(void *user_data, struct ttm_buffer_object *bo,
+ unsigned long new_offset);
+ /*
+ * @}
+ */
+
+ /*
+ * Per-VMA data init/cleanup. VMA may be opened/closed many times
+ * as the result of split/copy, but the init/cleanup handlers are called
+ * only once, i.e. vigs_mman is handling the reference counts.
+ *
+ * current's 'mmap_sem' is locked while calling this.
+ * @{
+ */
+
+ void (*init_vma)(void *user_data,
+ void *vma_data,
+ struct ttm_buffer_object *bo,
+ bool track_access);
+
+ /*
+ * current's 'mmap_sem' is locked while calling this.
+ */
+ void (*cleanup_vma)(void *user_data, void *vma_data);
+
+ /*
+ * @}
+ */
+};
+
+typedef int (*vigs_mman_access_vma_func)(void *user_data, void *vma_data);
+
+struct vigs_mman
+{
+ struct kmem_cache *vma_cache;
+
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bo_dev;
+
+ resource_size_t vram_base;
+ resource_size_t ram_base;
+
+ struct vigs_mman_ops *ops;
+ void *user_data;
+};
+
+static inline struct vigs_mman *bo_dev_to_vigs_mman(struct ttm_bo_device *bo_dev)
+{
+ return container_of(bo_dev, struct vigs_mman, bo_dev);
+}
+
+int vigs_mman_create(resource_size_t vram_base,
+ resource_size_t vram_size,
+ resource_size_t ram_base,
+ resource_size_t ram_size,
+ uint32_t vma_data_size,
+ struct vigs_mman_ops *ops,
+ void *user_data,
+ struct vigs_mman **mman);
+
+void vigs_mman_destroy(struct vigs_mman *mman);
+
+int vigs_mman_mmap(struct vigs_mman *mman,
+ struct file *filp,
+ struct vm_area_struct *vma,
+ bool track_access);
+
+/*
+ * current's 'mmap_sem' is locked while calling 'func'.
+ */
+int vigs_mman_access_vma(struct vigs_mman *mman,
+ unsigned long address,
+ vigs_mman_access_vma_func func,
+ void *user_data);
+
+#endif
--- /dev/null
+#include "vigs_output.h"
+#include "vigs_device.h"
+#include "drm_crtc_helper.h"
+#include <linux/init.h>
+
+#define DPI_DEF_VALUE 316
+#define DPI_MIN_VALUE 100
+#define DPI_MAX_VALUE 600
+
+#ifndef MODULE
+static int vigs_atoi(const char *str)
+{
+ int val = 0;
+
+ for (;; ++str) {
+ switch (*str) {
+ case '0' ... '9':
+ val = (10 * val) + (*str - '0');
+ break;
+ default:
+ return val;
+ }
+ }
+}
+#endif
+
+struct vigs_output
+{
+ /*
+ * 'connector' is the owner of the 'vigs_output', i.e.
+ * when 'connector' is destroyed whole structure is destroyed.
+ */
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+};
+
+static inline struct vigs_output *connector_to_vigs_output(struct drm_connector *connector)
+{
+ return container_of(connector, struct vigs_output, connector);
+}
+
+static inline struct vigs_output *encoder_to_vigs_output(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct vigs_output, encoder);
+}
+
+static void vigs_connector_save(struct drm_connector *connector)
+{
+ DRM_DEBUG_KMS("enter\n");
+}
+
+static void vigs_connector_restore(struct drm_connector *connector)
+{
+ DRM_DEBUG_KMS("enter\n");
+}
+
+static enum drm_connector_status vigs_connector_detect(
+ struct drm_connector *connector,
+ bool force)
+{
+ DRM_DEBUG_KMS("enter: force = %d\n", force);
+
+ return connector_status_connected;
+}
+
+static int vigs_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ DRM_DEBUG_KMS("enter: %s = %llu\n", property->name, value);
+
+ return 0;
+}
+
+static void vigs_connector_destroy(struct drm_connector *connector)
+{
+ struct vigs_output *vigs_output = connector_to_vigs_output(connector);
+
+ DRM_DEBUG_KMS("enter\n");
+
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+
+ kfree(vigs_output);
+}
+
+static int vigs_connector_get_modes(struct drm_connector *connector)
+{
+ struct vigs_output *vigs_output = connector_to_vigs_output(connector);
+ struct drm_device *drm_dev = vigs_output->connector.dev;
+ char *option = NULL;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ if (fb_get_options(drm_get_connector_name(connector), &option) == 0) {
+ struct drm_cmdline_mode cmdline_mode;
+
+ if (drm_mode_parse_command_line_for_connector(option,
+ connector,
+ &cmdline_mode)) {
+ struct drm_display_mode *preferred_mode =
+ drm_mode_create_from_cmdline_mode(drm_dev,
+ &cmdline_mode);
+
+ /* qHD workaround (540x960) */
+ if (cmdline_mode.xres == 540 && cmdline_mode.yres == 960) {
+ preferred_mode->hdisplay = cmdline_mode.xres;
+ preferred_mode->hsync_start = preferred_mode->hsync_start - 1;
+ preferred_mode->hsync_end = preferred_mode->hsync_end - 1;
+ }
+
+ preferred_mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+ drm_mode_set_crtcinfo(preferred_mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_probed_add(connector, preferred_mode);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int vigs_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ return MODE_OK;
+}
+
+struct drm_encoder *vigs_connector_best_encoder(struct drm_connector *connector)
+{
+ struct vigs_output *vigs_output = connector_to_vigs_output(connector);
+
+ DRM_DEBUG_KMS("enter\n");
+
+ return &vigs_output->encoder;
+}
+
+static void vigs_encoder_destroy(struct drm_encoder *encoder)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ drm_encoder_cleanup(encoder);
+}
+
+static void vigs_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ DRM_DEBUG_KMS("enter: mode = %d\n", mode);
+}
+
+static bool vigs_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ DRM_DEBUG_KMS("enter\n");
+
+ return true;
+}
+
+static void vigs_encoder_prepare(struct drm_encoder *encoder)
+{
+ DRM_DEBUG_KMS("enter\n");
+}
+
+static void vigs_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ DRM_DEBUG_KMS("enter\n");
+}
+
+static void vigs_encoder_commit(struct drm_encoder *encoder)
+{
+ DRM_DEBUG_KMS("enter\n");
+}
+
+static const struct drm_connector_funcs vigs_connector_funcs =
+{
+ .dpms = drm_helper_connector_dpms,
+ .save = vigs_connector_save,
+ .restore = vigs_connector_restore,
+ .detect = vigs_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = vigs_connector_set_property,
+ .destroy = vigs_connector_destroy,
+};
+
+static const struct drm_connector_helper_funcs vigs_connector_helper_funcs =
+{
+ .get_modes = vigs_connector_get_modes,
+ .mode_valid = vigs_connector_mode_valid,
+ .best_encoder = vigs_connector_best_encoder,
+};
+
+static const struct drm_encoder_funcs vigs_encoder_funcs =
+{
+ .destroy = vigs_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs vigs_encoder_helper_funcs =
+{
+ .dpms = vigs_encoder_dpms,
+ .mode_fixup = vigs_encoder_mode_fixup,
+ .prepare = vigs_encoder_prepare,
+ .mode_set = vigs_encoder_mode_set,
+ .commit = vigs_encoder_commit,
+};
+
+int vigs_output_init(struct vigs_device *vigs_dev)
+{
+ struct vigs_output *vigs_output;
+ int ret;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ vigs_output = kzalloc(sizeof(*vigs_output), GFP_KERNEL);
+
+ if (!vigs_output) {
+ return -ENOMEM;
+ }
+
+ ret = drm_connector_init(vigs_dev->drm_dev,
+ &vigs_output->connector,
+ &vigs_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ if (ret != 0) {
+ kfree(vigs_output);
+ return ret;
+ }
+
+ ret = drm_encoder_init(vigs_dev->drm_dev,
+ &vigs_output->encoder,
+ &vigs_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+
+ if (ret != 0) {
+ /*
+ * KMS subsystem will delete 'vigs_output'
+ */
+
+ return ret;
+ }
+
+ /*
+ * We only have a single CRTC.
+ */
+ vigs_output->encoder.possible_crtcs = (1 << 0);
+
+ ret = drm_mode_connector_attach_encoder(&vigs_output->connector,
+ &vigs_output->encoder);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ drm_encoder_helper_add(&vigs_output->encoder, &vigs_encoder_helper_funcs);
+
+ drm_connector_helper_add(&vigs_output->connector, &vigs_connector_helper_funcs);
+
+ ret = drm_sysfs_connector_add(&vigs_output->connector);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+int vigs_output_get_dpi(void)
+{
+ int dpi = DPI_DEF_VALUE;
+#ifndef MODULE
+ char *str;
+
+ str = strstr(saved_command_line, "dpi=");
+
+ if (str != NULL) {
+ str += 4;
+ dpi = vigs_atoi(str);
+ if ((dpi < DPI_MIN_VALUE) || (dpi > DPI_MAX_VALUE)) {
+ dpi = DPI_DEF_VALUE;
+ }
+ }
+#endif
+ return dpi;
+}
+
+int vigs_output_get_phys_width(int dpi, u32 width)
+{
+ return ((width * 254 / dpi) + 5) / 10;
+}
+
+int vigs_output_get_phys_height(int dpi, u32 height)
+{
+ return ((height * 254 / dpi) + 5) / 10;
+}
--- /dev/null
+#ifndef _VIGS_OUTPUT_H_
+#define _VIGS_OUTPUT_H_
+
+#include "drmP.h"
+
+struct vigs_device;
+
+int vigs_output_init(struct vigs_device *vigs_dev);
+
+int vigs_output_get_dpi(void);
+
+int vigs_output_get_phys_width(int dpi, u32 width);
+
+int vigs_output_get_phys_height(int dpi, u32 height);
+
+#endif
--- /dev/null
+#include "vigs_plane.h"
+#include "vigs_device.h"
+#include "vigs_framebuffer.h"
+#include "vigs_surface.h"
+#include "vigs_comm.h"
+#include <drm/vigs_drm.h>
+
+static const uint32_t formats[] =
+{
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_NV21,
+ fourcc_code('N', 'V', '4', '2'),
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_YUV420
+};
+
+static int vigs_plane_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w,
+ unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct vigs_plane *vigs_plane = plane_to_vigs_plane(plane);
+ struct vigs_device *vigs_dev = plane->dev->dev_private;
+ struct vigs_framebuffer *vigs_fb = fb_to_vigs_fb(fb);
+ int ret, i;
+ uint32_t src_x_whole = src_x >> 16;
+ uint32_t src_y_whole = src_y >> 16;
+ uint32_t src_w_whole = src_w >> 16;
+ uint32_t src_h_whole = src_h >> 16;
+ vigsp_surface_id surface_ids[4] = { 0, 0, 0, 0 };
+ vigsp_plane_format format;
+
+ DRM_DEBUG_KMS("enter: crtc_x = %d, crtc_y = %d, crtc_w = %u, crtc_h = %u, src_x = %u, src_y = %u, src_w = %u, src_h = %u\n",
+ crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h);
+
+ if (vigs_fb->surfaces[0]->scanout) {
+ vigs_gem_reserve(&vigs_fb->surfaces[0]->gem);
+
+ if (vigs_gem_in_vram(&vigs_fb->surfaces[0]->gem) &&
+ vigs_surface_need_gpu_update(vigs_fb->surfaces[0])) {
+ vigs_comm_update_gpu(vigs_dev->comm,
+ vigs_fb->surfaces[0]->id,
+ vigs_fb->surfaces[0]->width,
+ vigs_fb->surfaces[0]->height,
+ vigs_gem_offset(&vigs_fb->surfaces[0]->gem));
+ }
+
+ vigs_gem_unreserve(&vigs_fb->surfaces[0]->gem);
+ }
+
+ for (i = 0; i < 4; ++i) {
+ if (vigs_fb->surfaces[i]) {
+ surface_ids[i] = vigs_fb->surfaces[i]->id;
+ }
+ }
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XRGB8888:
+ format = vigsp_plane_bgrx8888;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ format = vigsp_plane_bgra8888;
+ break;
+ case DRM_FORMAT_NV21:
+ format = vigsp_plane_nv21;
+ break;
+ case fourcc_code('N', 'V', '4', '2'):
+ format = vigsp_plane_nv42;
+ break;
+ case DRM_FORMAT_NV61:
+ format = vigsp_plane_nv61;
+ break;
+ case DRM_FORMAT_YUV420:
+ format = vigsp_plane_yuv420;
+ break;
+ default:
+ BUG();
+ format = vigsp_plane_bgrx8888;
+ break;
+ }
+
+ ret = vigs_comm_set_plane(vigs_dev->comm,
+ vigs_plane->index,
+ fb->width,
+ fb->height,
+ format,
+ surface_ids,
+ src_x_whole,
+ src_y_whole,
+ src_w_whole,
+ src_h_whole,
+ crtc_x,
+ crtc_y,
+ crtc_w,
+ crtc_h,
+ vigs_plane->z_pos,
+ vigs_plane->hflip,
+ vigs_plane->vflip,
+ vigs_plane->rotation);
+
+ if (ret == 0) {
+ vigs_plane->src_x = src_x;
+ vigs_plane->src_y = src_y;
+ vigs_plane->src_w = src_w;
+ vigs_plane->src_h = src_h;
+
+ vigs_plane->crtc_x = crtc_x;
+ vigs_plane->crtc_y = crtc_y;
+ vigs_plane->crtc_w = crtc_w;
+ vigs_plane->crtc_h = crtc_h;
+
+ vigs_plane->enabled = true;
+ }
+
+ return ret;
+}
+
+static int vigs_plane_disable(struct drm_plane *plane)
+{
+ struct vigs_plane *vigs_plane = plane_to_vigs_plane(plane);
+ struct vigs_device *vigs_dev = plane->dev->dev_private;
+ int ret;
+ vigsp_surface_id surface_ids[4] = { 0, 0, 0, 0 };
+
+ DRM_DEBUG_KMS("enter\n");
+
+ if (!vigs_plane->enabled) {
+ return 0;
+ }
+
+ ret = vigs_comm_set_plane(vigs_dev->comm,
+ vigs_plane->index,
+ 0,
+ 0,
+ 0,
+ surface_ids,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0);
+
+ if (ret == 0) {
+ vigs_plane->src_x = 0;
+ vigs_plane->src_y = 0;
+ vigs_plane->src_w = 0;
+ vigs_plane->src_h = 0;
+
+ vigs_plane->crtc_x = 0;
+ vigs_plane->crtc_y = 0;
+ vigs_plane->crtc_w = 0;
+ vigs_plane->crtc_h = 0;
+
+ vigs_plane->enabled = false;
+ }
+
+ return ret;
+}
+
+static void vigs_plane_destroy(struct drm_plane *plane)
+{
+ struct vigs_plane *vigs_plane = plane_to_vigs_plane(plane);
+
+ DRM_DEBUG_KMS("enter\n");
+
+ vigs_plane_disable(plane);
+ drm_plane_cleanup(plane);
+ kfree(vigs_plane);
+}
+
+static const struct drm_plane_funcs vigs_plane_funcs =
+{
+ .update_plane = vigs_plane_update,
+ .disable_plane = vigs_plane_disable,
+ .destroy = vigs_plane_destroy,
+};
+
+int vigs_plane_init(struct vigs_device *vigs_dev, u32 index)
+{
+ struct vigs_plane *vigs_plane;
+ int ret;
+
+ DRM_DEBUG_KMS("enter\n");
+
+ vigs_plane = kzalloc(sizeof(*vigs_plane), GFP_KERNEL);
+
+ if (!vigs_plane) {
+ return -ENOMEM;
+ }
+
+ vigs_plane->index = index;
+
+ ret = drm_plane_init(vigs_dev->drm_dev,
+ &vigs_plane->base,
+ (1 << 0),
+ &vigs_plane_funcs,
+ formats,
+ ARRAY_SIZE(formats),
+ false);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+int vigs_plane_set_zpos_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vigs_plane_set_zpos *args = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct vigs_plane *vigs_plane;
+ int ret;
+
+ drm_modeset_lock_all(drm_dev);
+
+ obj = drm_mode_object_find(drm_dev,
+ args->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ plane = obj_to_plane(obj);
+ vigs_plane = plane_to_vigs_plane(plane);
+
+ vigs_plane->z_pos = args->zpos;
+
+ ret = 0;
+
+out:
+ drm_modeset_unlock_all(drm_dev);
+
+ return ret;
+}
+
+int vigs_plane_set_transform_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vigs_plane_set_transform *args = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct vigs_plane *vigs_plane;
+ int ret;
+
+ drm_modeset_lock_all(drm_dev);
+
+ obj = drm_mode_object_find(drm_dev,
+ args->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ plane = obj_to_plane(obj);
+ vigs_plane = plane_to_vigs_plane(plane);
+
+ vigs_plane->hflip = args->hflip;
+ vigs_plane->vflip = args->vflip;
+ vigs_plane->rotation = args->rotation;
+
+ ret = 0;
+
+out:
+ drm_modeset_unlock_all(drm_dev);
+
+ return ret;
+}
--- /dev/null
+#ifndef _VIGS_PLANE_H_
+#define _VIGS_PLANE_H_
+
+#include "drmP.h"
+
+struct vigs_device;
+
+struct vigs_plane
+{
+ struct drm_plane base;
+
+ u32 index;
+
+ unsigned int src_x;
+ unsigned int src_y;
+ unsigned int src_w;
+ unsigned int src_h;
+
+ int crtc_x;
+ int crtc_y;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
+
+ int z_pos;
+ int hflip;
+ int vflip;
+ int rotation;
+
+ bool enabled;
+};
+
+static inline struct vigs_plane *plane_to_vigs_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct vigs_plane, base);
+}
+
+int vigs_plane_init(struct vigs_device *vigs_dev, u32 index);
+
+/*
+ * IOCTLs
+ * @{
+ */
+
+int vigs_plane_set_zpos_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_plane_set_transform_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
+#endif
--- /dev/null
+#ifndef _VIGS_PROTOCOL_H_
+#define _VIGS_PROTOCOL_H_
+
+/*
+ * VIGS protocol is a multiple request-no response protocol.
+ */
+
+/*
+ * Bump this whenever protocol changes.
+ */
+#define VIGS_PROTOCOL_VERSION 21
+
+#define VIGS_MAX_PLANES 3
+
+typedef signed char vigsp_s8;
+typedef signed short vigsp_s16;
+typedef signed int vigsp_s32;
+typedef signed long long vigsp_s64;
+typedef unsigned char vigsp_u8;
+typedef unsigned short vigsp_u16;
+typedef unsigned int vigsp_u32;
+typedef unsigned long long vigsp_u64;
+
+typedef vigsp_u32 vigsp_bool;
+typedef vigsp_u32 vigsp_surface_id;
+typedef vigsp_u32 vigsp_offset;
+typedef vigsp_u32 vigsp_color;
+typedef vigsp_u32 vigsp_fence_seq;
+
+typedef enum
+{
+ /*
+ * These command are guaranteed to sync on host, i.e.
+ * no fence is required.
+ * @{
+ */
+ vigsp_cmd_init = 0x0,
+ vigsp_cmd_reset = 0x1,
+ vigsp_cmd_exit = 0x2,
+ vigsp_cmd_set_root_surface = 0x3,
+ /*
+ * @}
+ */
+ /*
+ * These commands are executed asynchronously.
+ * @{
+ */
+ vigsp_cmd_create_surface = 0x4,
+ vigsp_cmd_destroy_surface = 0x5,
+ vigsp_cmd_update_vram = 0x6,
+ vigsp_cmd_update_gpu = 0x7,
+ vigsp_cmd_copy = 0x8,
+ vigsp_cmd_solid_fill = 0x9,
+ vigsp_cmd_set_plane = 0xA,
+ vigsp_cmd_ga_copy = 0xB
+ /*
+ * @}
+ */
+} vigsp_cmd;
+
+typedef enum
+{
+ vigsp_surface_bgrx8888 = 0x0,
+ vigsp_surface_bgra8888 = 0x1,
+} vigsp_surface_format;
+
+typedef enum
+{
+ vigsp_plane_bgrx8888 = 0x0,
+ vigsp_plane_bgra8888 = 0x1,
+ vigsp_plane_nv21 = 0x2,
+ vigsp_plane_nv42 = 0x3,
+ vigsp_plane_nv61 = 0x4,
+ vigsp_plane_yuv420 = 0x5
+} vigsp_plane_format;
+
+typedef enum
+{
+ vigsp_rotation0 = 0x0,
+ vigsp_rotation90 = 0x1,
+ vigsp_rotation180 = 0x2,
+ vigsp_rotation270 = 0x3
+} vigsp_rotation;
+
+#pragma pack(1)
+
+struct vigsp_point
+{
+ vigsp_u32 x;
+ vigsp_u32 y;
+};
+
+struct vigsp_size
+{
+ vigsp_u32 w;
+ vigsp_u32 h;
+};
+
+struct vigsp_rect
+{
+ struct vigsp_point pos;
+ struct vigsp_size size;
+};
+
+struct vigsp_copy
+{
+ struct vigsp_point from;
+ struct vigsp_point to;
+ struct vigsp_size size;
+};
+
+struct vigsp_cmd_batch_header
+{
+ /*
+ * Fence sequence requested by this batch.
+ * 0 for none.
+ */
+ vigsp_fence_seq fence_seq;
+
+ /*
+ * Batch size starting from batch header.
+ * Can be 0.
+ */
+ vigsp_u32 size;
+};
+
+struct vigsp_cmd_request_header
+{
+ vigsp_cmd cmd;
+
+ /*
+ * Request size starting from request header.
+ */
+ vigsp_u32 size;
+};
+
+/*
+ * cmd_init
+ *
+ * First command to be sent, client passes its protocol version
+ * and receives server's in response. If 'client_version' doesn't match
+ * 'server_version' then initialization is considered failed. This
+ * is typically called on target's DRM driver load.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_init_request
+{
+ vigsp_u32 client_version;
+ vigsp_u32 server_version;
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_reset
+ *
+ * Destroys all surfaces but root surface, this typically happens
+ * or DRM's lastclose.
+ *
+ * @{
+ * @}
+ */
+
+/*
+ * cmd_exit
+ *
+ * Destroys all surfaces and transitions into uninitialized state, this
+ * typically happens when target's DRM driver gets unloaded.
+ *
+ * @{
+ * @}
+ */
+
+/*
+ * cmd_create_surface
+ *
+ * Called for each surface created. Client passes 'id' of the surface,
+ * all further operations must be carried out using this is. 'id' is
+ * unique across whole target system.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_create_surface_request
+{
+ vigsp_u32 width;
+ vigsp_u32 height;
+ vigsp_u32 stride;
+ vigsp_surface_format format;
+ vigsp_surface_id id;
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_destroy_surface
+ *
+ * Destroys the surface identified by 'id'. Surface 'id' may not be used
+ * after this call and its id can be assigned to some other surface right
+ * after this call.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_destroy_surface_request
+{
+ vigsp_surface_id id;
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_set_root_surface
+ *
+ * Sets surface identified by 'id' as new root surface. Root surface is the
+ * one that's displayed on screen. Root surface resides in VRAM
+ * all the time if 'scanout' is true.
+ *
+ * Pass 0 as id in order to reset the root surface.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_set_root_surface_request
+{
+ vigsp_surface_id id;
+ vigsp_bool scanout;
+ vigsp_offset offset;
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_update_vram
+ *
+ * Updates 'sfc_id' in vram.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_update_vram_request
+{
+ vigsp_surface_id sfc_id;
+ vigsp_offset offset;
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_update_gpu
+ *
+ * Updates 'sfc_id' in GPU.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_update_gpu_request
+{
+ vigsp_surface_id sfc_id;
+ vigsp_offset offset;
+ vigsp_u32 num_entries;
+ struct vigsp_rect entries[0];
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_copy
+ *
+ * Copies parts of surface 'src_id' to
+ * surface 'dst_id'.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_copy_request
+{
+ vigsp_surface_id src_id;
+ vigsp_surface_id dst_id;
+ vigsp_u32 num_entries;
+ struct vigsp_copy entries[0];
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_solid_fill
+ *
+ * Fills surface 'sfc_id' with color 'color' at 'entries'.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_solid_fill_request
+{
+ vigsp_surface_id sfc_id;
+ vigsp_color color;
+ vigsp_u32 num_entries;
+ struct vigsp_rect entries[0];
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_set_plane
+ *
+ * Assigns surfaces 'surfaces' to plane identified by 'plane'.
+ *
+ * Pass 0 as surfaces[0] in order to disable the plane.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_set_plane_request
+{
+ vigsp_u32 plane;
+ vigsp_u32 width;
+ vigsp_u32 height;
+ vigsp_plane_format format;
+ vigsp_surface_id surfaces[4];
+ struct vigsp_rect src_rect;
+ vigsp_s32 dst_x;
+ vigsp_s32 dst_y;
+ struct vigsp_size dst_size;
+ vigsp_s32 z_pos;
+ vigsp_bool hflip;
+ vigsp_bool vflip;
+ vigsp_rotation rotation;
+};
+
+/*
+ * @}
+ */
+
+/*
+ * cmd_ga_copy
+ *
+ * Copies part of surface 'src_id' to
+ * surface 'dst_id' given surface
+ * sizes.
+ *
+ * @{
+ */
+
+struct vigsp_cmd_ga_copy_request
+{
+ vigsp_surface_id src_id;
+ vigsp_bool src_scanout;
+ vigsp_offset src_offset;
+ vigsp_u32 src_stride;
+ vigsp_surface_id dst_id;
+ vigsp_u32 dst_stride;
+ struct vigsp_copy entry;
+};
+
+/*
+ * @}
+ */
+
+#pragma pack()
+
+#endif
--- /dev/null
+#ifndef _VIGS_REGS_H_
+#define _VIGS_REGS_H_
+
+#define VIGS_REG_EXEC 0
+#define VIGS_REG_CON 8
+#define VIGS_REG_INT 16
+#define VIGS_REG_FENCE_LOWER 24
+#define VIGS_REG_FENCE_UPPER 32
+
+#define VIGS_REG_CON_VBLANK_ENABLE 1
+
+#define VIGS_REG_INT_VBLANK_PENDING 1
+#define VIGS_REG_INT_FENCE_ACK_PENDING 2
+
+#endif
--- /dev/null
+#include "vigs_surface.h"
+#include "vigs_device.h"
+#include "vigs_comm.h"
+#include "vigs_mman.h"
+#include <drm/vigs_drm.h>
+
+/*
+ * Functions below MUST be accessed between
+ * vigs_gem_reserve/vigs_gem_unreserve.
+ * @{
+ */
+
+static u32 vigs_surface_saf(struct vigs_surface *sfc)
+{
+ u32 saf = 0;
+
+ if (sfc->num_readers > 0) {
+ saf |= DRM_VIGS_SAF_READ;
+ }
+
+ if (sfc->num_writers > 0) {
+ saf |= DRM_VIGS_SAF_WRITE;
+ }
+
+ return saf;
+}
+
+static void vigs_surface_saf_changed(struct vigs_surface *sfc,
+ u32 old_saf)
+{
+ u32 new_saf = vigs_surface_saf(sfc);
+
+ if (old_saf == new_saf) {
+ return;
+ }
+
+ /*
+ * If we're in GPU and access is write-only then we can
+ * obviously skip first VRAM update, since there's nothing
+ * to read back yet. After first VRAM update, however, we must
+ * read back every time since the clients must see their
+ * changes.
+ */
+
+ sfc->skip_vram_update = !vigs_gem_in_vram(&sfc->gem) &&
+ (new_saf == DRM_VIGS_SAF_WRITE) &&
+ !(old_saf & DRM_VIGS_SAF_WRITE);
+}
+
+static void vigs_vma_data_end_access(struct vigs_vma_data *vma_data, bool sync)
+{
+ struct vigs_surface *sfc = vma_data->sfc;
+ struct vigs_device *vigs_dev = sfc->gem.base.dev->dev_private;
+ u32 old_saf = vigs_surface_saf(sfc);
+
+ if (vma_data->saf & DRM_VIGS_SAF_READ) {
+ --sfc->num_readers;
+ }
+
+ if ((vma_data->saf & DRM_VIGS_SAF_WRITE) == 0) {
+ goto out;
+ }
+
+ if (sync) {
+ /*
+ * We have a sync, drop all pending
+ * writers.
+ */
+ sfc->num_writers -= sfc->num_pending_writers;
+ sfc->num_pending_writers = 0;
+ }
+
+ if (!vigs_gem_in_vram(&sfc->gem)) {
+ --sfc->num_writers;
+ goto out;
+ }
+
+ if (sync) {
+ --sfc->num_writers;
+ vigs_comm_update_gpu(vigs_dev->comm,
+ sfc->id,
+ sfc->width,
+ sfc->height,
+ vigs_gem_offset(&sfc->gem));
+ sfc->is_gpu_dirty = false;
+ } else {
+ ++sfc->num_pending_writers;
+ }
+
+out:
+ vma_data->saf = 0;
+
+ vigs_surface_saf_changed(sfc, old_saf);
+}
+
+/*
+ * @}
+ */
+
+void vigs_vma_data_init(struct vigs_vma_data *vma_data,
+ struct vigs_surface *sfc,
+ bool track_access)
+{
+ struct vigs_device *vigs_dev = sfc->gem.base.dev->dev_private;
+ u32 old_saf;
+
+ vma_data->sfc = sfc;
+ vma_data->saf = 0;
+ vma_data->track_access = track_access;
+
+ if (track_access) {
+ return;
+ }
+
+ /*
+ * If we don't want to track access for this VMA
+ * then register as both reader and writer.
+ */
+
+ vigs_gem_reserve(&sfc->gem);
+
+ old_saf = vigs_surface_saf(sfc);
+
+ ++sfc->num_writers;
+ ++sfc->num_readers;
+
+ if (vigs_gem_in_vram(&sfc->gem) && sfc->is_gpu_dirty) {
+ vigs_comm_update_vram(vigs_dev->comm,
+ sfc->id,
+ vigs_gem_offset(&sfc->gem));
+ sfc->is_gpu_dirty = false;
+ }
+
+ vma_data->saf = DRM_VIGS_SAF_READ | DRM_VIGS_SAF_WRITE;
+
+ vigs_surface_saf_changed(sfc, old_saf);
+
+ vigs_gem_unreserve(&sfc->gem);
+}
+
+void vigs_vma_data_cleanup(struct vigs_vma_data *vma_data)
+{
+ vigs_gem_reserve(&vma_data->sfc->gem);
+
+ /*
+ * On unmap we sync only when access tracking is enabled.
+ * Otherwise, we pretend we're going to sync
+ * some time later, but we never will.
+ */
+ vigs_vma_data_end_access(vma_data,
+ vma_data->track_access);
+
+ vigs_gem_unreserve(&vma_data->sfc->gem);
+}
+
+static void vigs_surface_destroy(struct vigs_gem_object *gem)
+{
+ struct vigs_surface *sfc = vigs_gem_to_vigs_surface(gem);
+ struct vigs_device *vigs_dev = gem->base.dev->dev_private;
+
+ if (sfc->id) {
+ vigs_comm_destroy_surface(vigs_dev->comm, sfc->id);
+
+ vigs_device_remove_surface(vigs_dev, sfc->id);
+
+ DRM_DEBUG_DRIVER("Surface destroyed (id = %u)\n", sfc->id);
+ }
+}
+
+int vigs_surface_create(struct vigs_device *vigs_dev,
+ u32 width,
+ u32 height,
+ u32 stride,
+ vigsp_surface_format format,
+ bool scanout,
+ struct vigs_surface **sfc)
+{
+ int ret = 0;
+
+ *sfc = kzalloc(sizeof(**sfc), GFP_KERNEL);
+
+ if (!*sfc) {
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ (*sfc)->width = width;
+ (*sfc)->height = height;
+ (*sfc)->stride = stride;
+ (*sfc)->format = format;
+ (*sfc)->scanout = scanout;
+
+ ret = vigs_gem_init(&(*sfc)->gem,
+ vigs_dev,
+ VIGS_GEM_TYPE_SURFACE,
+ stride * height,
+ false,
+ &vigs_surface_destroy);
+
+ if (ret != 0) {
+ goto fail1;
+ }
+
+ ret = vigs_device_add_surface_unlocked(vigs_dev, *sfc, &(*sfc)->id);
+
+ if (ret != 0) {
+ goto fail2;
+ }
+
+ ret = vigs_comm_create_surface(vigs_dev->comm,
+ width,
+ height,
+ stride,
+ format,
+ (*sfc)->id);
+
+ if (ret != 0) {
+ goto fail3;
+ }
+
+ return 0;
+
+fail3:
+ vigs_device_remove_surface_unlocked(vigs_dev, (*sfc)->id);
+fail2:
+ (*sfc)->id = 0;
+ vigs_gem_cleanup(&(*sfc)->gem);
+fail1:
+ *sfc = NULL;
+
+ return ret;
+}
+
+bool vigs_surface_need_vram_update(struct vigs_surface *sfc)
+{
+ u32 saf = vigs_surface_saf(sfc);
+ bool skip_vram_update = sfc->skip_vram_update;
+
+ sfc->skip_vram_update = false;
+
+ return (saf != 0) && !skip_vram_update;
+}
+
+bool vigs_surface_need_gpu_update(struct vigs_surface *sfc)
+{
+ u32 old_saf = vigs_surface_saf(sfc);
+
+ sfc->num_writers -= sfc->num_pending_writers;
+ sfc->num_pending_writers = 0;
+
+ vigs_surface_saf_changed(sfc, old_saf);
+
+ return old_saf & DRM_VIGS_SAF_WRITE;
+}
+
+int vigs_surface_create_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct drm_vigs_create_surface *args = data;
+ struct vigs_surface *sfc = NULL;
+ uint32_t handle;
+ int ret;
+
+ ret = vigs_surface_create(vigs_dev,
+ args->width,
+ args->height,
+ args->stride,
+ args->format,
+ args->scanout,
+ &sfc);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ ret = drm_gem_handle_create(file_priv,
+ &sfc->gem.base,
+ &handle);
+
+ drm_gem_object_unreference_unlocked(&sfc->gem.base);
+
+ if (ret == 0) {
+ args->handle = handle;
+ args->size = vigs_gem_size(&sfc->gem);
+ args->id = sfc->id;
+ }
+
+ return ret;
+}
+
+int vigs_surface_info_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vigs_surface_info *args = data;
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+ struct vigs_surface *sfc;
+
+ gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
+
+ if (gem == NULL) {
+ return -ENOENT;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ if (vigs_gem->type != VIGS_GEM_TYPE_SURFACE) {
+ drm_gem_object_unreference_unlocked(gem);
+ return -ENOENT;
+ }
+
+ sfc = vigs_gem_to_vigs_surface(vigs_gem);
+
+ args->width = sfc->width;
+ args->height = sfc->height;
+ args->stride = sfc->stride;
+ args->format = sfc->format;
+ args->scanout = sfc->scanout;
+ args->size = vigs_gem_size(vigs_gem);
+ args->id = sfc->id;
+
+ drm_gem_object_unreference_unlocked(gem);
+
+ return 0;
+}
+
+int vigs_surface_set_gpu_dirty_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vigs_surface_set_gpu_dirty *args = data;
+ struct drm_gem_object *gem;
+ struct vigs_gem_object *vigs_gem;
+ struct vigs_surface *sfc;
+
+ gem = drm_gem_object_lookup(drm_dev, file_priv, args->handle);
+
+ if (gem == NULL) {
+ return -ENOENT;
+ }
+
+ vigs_gem = gem_to_vigs_gem(gem);
+
+ if (vigs_gem->type != VIGS_GEM_TYPE_SURFACE) {
+ drm_gem_object_unreference_unlocked(gem);
+ return -ENOENT;
+ }
+
+ sfc = vigs_gem_to_vigs_surface(vigs_gem);
+
+ vigs_gem_reserve(&sfc->gem);
+
+ if (vigs_gem_in_vram(&sfc->gem)) {
+ sfc->is_gpu_dirty = true;
+ }
+
+ vigs_gem_unreserve(&sfc->gem);
+
+ drm_gem_object_unreference_unlocked(gem);
+
+ return 0;
+}
+
+static int vigs_surface_start_access(void *user_data, void *vma_data_opaque)
+{
+ struct drm_vigs_surface_start_access *args = user_data;
+ struct vigs_vma_data *vma_data = vma_data_opaque;
+ struct vigs_surface *sfc = vma_data->sfc;
+ struct vigs_device *vigs_dev;
+ u32 old_saf;
+
+ if (!sfc) {
+ return -ENOENT;
+ }
+
+ if (!vma_data->track_access) {
+ return 0;
+ }
+
+ vigs_dev = sfc->gem.base.dev->dev_private;
+
+ if ((args->saf & ~DRM_VIGS_SAF_MASK) != 0) {
+ return -EINVAL;
+ }
+
+ vigs_gem_reserve(&sfc->gem);
+
+ old_saf = vigs_surface_saf(sfc);
+
+ if (vma_data->saf & DRM_VIGS_SAF_READ) {
+ --sfc->num_readers;
+ }
+
+ if (vma_data->saf & DRM_VIGS_SAF_WRITE) {
+ --sfc->num_writers;
+ }
+
+ if (args->saf & DRM_VIGS_SAF_WRITE) {
+ ++sfc->num_writers;
+ }
+
+ if (args->saf & DRM_VIGS_SAF_READ) {
+ ++sfc->num_readers;
+
+ if (vigs_gem_in_vram(&sfc->gem) && sfc->is_gpu_dirty) {
+ vigs_comm_update_vram(vigs_dev->comm,
+ sfc->id,
+ vigs_gem_offset(&sfc->gem));
+ sfc->is_gpu_dirty = false;
+ }
+ }
+
+ vma_data->saf = args->saf;
+
+ vigs_surface_saf_changed(sfc, old_saf);
+
+ vigs_gem_unreserve(&sfc->gem);
+
+ return 0;
+}
+
+int vigs_surface_start_access_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct drm_vigs_surface_start_access *args = data;
+
+ return vigs_mman_access_vma(vigs_dev->mman,
+ args->address,
+ &vigs_surface_start_access,
+ args);
+}
+
+static int vigs_surface_end_access(void *user_data, void *vma_data_opaque)
+{
+ struct drm_vigs_surface_end_access *args = user_data;
+ struct vigs_vma_data *vma_data = vma_data_opaque;
+ struct vigs_surface *sfc = vma_data->sfc;
+
+ if (!sfc) {
+ return -ENOENT;
+ }
+
+ if (!vma_data->track_access) {
+ return 0;
+ }
+
+ vigs_gem_reserve(&sfc->gem);
+
+ vigs_vma_data_end_access(vma_data, args->sync);
+
+ vigs_gem_unreserve(&sfc->gem);
+
+ return 0;
+}
+
+int vigs_surface_end_access_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct vigs_device *vigs_dev = drm_dev->dev_private;
+ struct drm_vigs_surface_end_access *args = data;
+
+ return vigs_mman_access_vma(vigs_dev->mman,
+ args->address,
+ &vigs_surface_end_access,
+ args);
+}
--- /dev/null
+#ifndef _VIGS_SURFACE_H_
+#define _VIGS_SURFACE_H_
+
+#include "drmP.h"
+#include "vigs_protocol.h"
+#include "vigs_gem.h"
+
+struct vigs_surface
+{
+ /*
+ * Must be first member!
+ */
+ struct vigs_gem_object gem;
+
+ u32 width;
+ u32 height;
+ u32 stride;
+ vigsp_surface_format format;
+ bool scanout;
+ vigsp_surface_id id;
+
+ /*
+ * Members below MUST be accessed between
+ * vigs_gem_reserve/vigs_gem_unreserve.
+ * @{
+ */
+
+ bool is_gpu_dirty;
+
+ /*
+ * Number of mmap areas (vmas) that accessed this surface for
+ * read/write.
+ * @{
+ */
+ u32 num_readers;
+ u32 num_writers;
+ /*
+ * @}
+ */
+
+ /*
+ * Number of mmap area writers that ended access asynchronously, i.e.
+ * they still account for in 'num_writers', but as soon as first GPU
+ * update operation takes place they'll be gone.
+ */
+ u32 num_pending_writers;
+
+ /*
+ * Specifies that we should not update VRAM on next 'update_vram'
+ * request. Lasts for one request.
+ */
+ bool skip_vram_update;
+
+ /*
+ * @}
+ */
+};
+
+struct vigs_vma_data
+{
+ struct vigs_surface *sfc;
+ u32 saf;
+ bool track_access;
+};
+
+void vigs_vma_data_init(struct vigs_vma_data *vma_data,
+ struct vigs_surface *sfc,
+ bool track_access);
+
+void vigs_vma_data_cleanup(struct vigs_vma_data *vma_data);
+
+static inline struct vigs_surface *vigs_gem_to_vigs_surface(struct vigs_gem_object *vigs_gem)
+{
+ return container_of(vigs_gem, struct vigs_surface, gem);
+}
+
+int vigs_surface_create(struct vigs_device *vigs_dev,
+ u32 width,
+ u32 height,
+ u32 stride,
+ vigsp_surface_format format,
+ bool scanout,
+ struct vigs_surface **sfc);
+
+/*
+ * Functions below MUST be accessed between
+ * vigs_gem_reserve/vigs_gem_unreserve.
+ * @{
+ */
+
+bool vigs_surface_need_vram_update(struct vigs_surface *sfc);
+
+bool vigs_surface_need_gpu_update(struct vigs_surface *sfc);
+
+/*
+ * @}
+ */
+
+/*
+ * IOCTLs
+ * @{
+ */
+
+int vigs_surface_create_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_surface_info_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_surface_set_gpu_dirty_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_surface_start_access_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+int vigs_surface_end_access_ioctl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv);
+
+/*
+ * @}
+ */
+
+#endif
--- /dev/null
+#
+# YaGL configuration
+#
+
+config YAGL
+ tristate "YaGL passthrough driver"
+ depends on (DRM || FB)
+ default n
+ help
+ This module enables YaGL passthrough from emulated system
+ to hypervisor (for example, QEMU). Must be used together with fake
+ (hypervisor-aware) OpenGL ES libraries.
+
+config YAGL_DEBUG
+ bool "YaGL calls debug messages"
+ depends on YAGL
+ default n
+ help
+ Enable YaGL debug messages.
--- /dev/null
+#
+# Makefile for the exynos video drivers.
+#
+
+ccflags-y := -Idrivers/gpu/yagl -Werror
+
+yagl-y := main.o yagl_driver.o
+
+obj-$(CONFIG_YAGL) += yagl.o
--- /dev/null
+#ifndef _YAGL_DEBUG_H_
+#define _YAGL_DEBUG_H_
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include "yagl.h"
+
+#ifdef CONFIG_YAGL_DEBUG
+# define dprintk(fmt, args...) printk(KERN_DEBUG YAGL_NAME "::%s: " fmt, __FUNCTION__, ## args)
+#else
+# define dprintk(fmt, args...)
+#endif
+
+#endif
--- /dev/null
+#include <linux/module.h>
+#include <linux/init.h>
+#include "print.h"
+#include "yagl_driver.h"
+
+MODULE_AUTHOR("Stanislav Vorobiov");
+MODULE_LICENSE("Dual BSD/GPL");
+
+int yagl_init(void)
+{
+ int ret = yagl_driver_register();
+
+ if (ret != 0)
+ {
+ return ret;
+ }
+
+ print_info("module loaded\n");
+
+ return 0;
+}
+
+void yagl_cleanup(void)
+{
+ yagl_driver_unregister();
+
+ print_info("module unloaded\n");
+}
+
+module_init(yagl_init);
+module_exit(yagl_cleanup);
--- /dev/null
+#ifndef _YAGL_PRINT_H_
+#define _YAGL_PRINT_H_
+
+#include <linux/kernel.h>
+#include "yagl.h"
+
+#define print_info(fmt, args...) printk(KERN_INFO YAGL_NAME ": " fmt, ## args)
+
+#define print_error(fmt, args...) printk(KERN_ERR YAGL_NAME ": " fmt, ## args)
+
+#endif
--- /dev/null
+#ifndef _YAGL_H_
+#define _YAGL_H_
+
+/*
+ * This is then module name.
+ */
+#define YAGL_NAME "yagl"
+
+#endif
--- /dev/null
+#include "yagl_driver.h"
+#include "yagl_ioctl.h"
+#include "yagl.h"
+#include "debug.h"
+#include "print.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/pci.h>
+#include <linux/pagemap.h>
+
+#define YAGL_REG_BUFFPTR 0
+#define YAGL_REG_TRIGGER 4
+#define YAGL_REGS_SIZE 8
+
+#define YAGL_MAX_USERS (PAGE_SIZE / YAGL_REGS_SIZE)
+
+#define YAGL_USER_PTR(regs, index) ((regs) + ((index) * YAGL_REGS_SIZE))
+
+#define PCI_VENDOR_ID_YAGL 0x19B1
+#define PCI_DEVICE_ID_YAGL 0x1010
+
+static struct pci_device_id yagl_pci_table[] =
+{
+ {
+ .vendor = PCI_VENDOR_ID_YAGL,
+ .device = PCI_DEVICE_ID_YAGL,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, yagl_pci_table);
+
+struct yagl_device
+{
+ /* PCI device we're managing */
+ struct pci_dev *pci_dev;
+
+ /* Misc device for accessing YaGL memory from user space */
+ struct miscdevice miscdev;
+
+ /* Physical address of YaGL registers. */
+ unsigned long regs_pa;
+
+ /* Memory area which is used for target <-> host communications */
+ void __iomem *regs;
+
+ /* 1 when user is active, 0 when slot can be used */
+ int user_map[YAGL_MAX_USERS];
+
+ /* Mutex used to serialize device operations */
+ struct mutex mutex;
+};
+
+struct yagl_mlock
+{
+ struct list_head list;
+
+ unsigned long address;
+
+ struct page **pages;
+ u32 num_pages;
+};
+
+struct yagl_file
+{
+ /* Owning device */
+ struct yagl_device *device;
+
+ /* Index in 'user_map', filled on 'open' */
+ int index;
+
+ /* Pages of a buffer. */
+ struct page **pages;
+ u32 num_pages;
+
+ /* Render type and host OpenGL version for this client, filled on 'open'. */
+ u32 render_type;
+ u32 gl_version;
+
+ /* List of mlock'ed memory regions. */
+ struct list_head mlock_list;
+};
+
+static __inline void yagl_marshal_put_uint32_t(u8** buff, u32 value)
+{
+ *(u32*)(*buff) = value;
+ *buff += 8;
+}
+
+static __inline u32 yagl_marshal_get_uint32_t(u8** buff)
+{
+ u32 tmp = *(u32*)*buff;
+ *buff += 8;
+ return tmp;
+}
+
+static void yagl_marshal_put_page_list(u8 **buff,
+ struct page **pages,
+ u32 count)
+{
+ u32 i;
+
+ yagl_marshal_put_uint32_t(buff, count);
+
+ for (i = 0; i < count; ++i) {
+ yagl_marshal_put_uint32_t(buff, (uint32_t)page_to_phys(pages[i]));
+ }
+}
+
+static void yagl_user_activate_update(void __iomem *regs,
+ int index,
+ unsigned long buff_pa)
+{
+ writel(buff_pa, YAGL_USER_PTR(regs, index) + YAGL_REG_BUFFPTR);
+}
+
+static void yagl_user_deactivate(void __iomem *regs, int index)
+{
+ writel(0, YAGL_USER_PTR(regs, index) + YAGL_REG_BUFFPTR);
+}
+
+static int yagl_alloc_pages(struct page ***pages,
+ u32 num_present,
+ u32 num_alloc)
+{
+ struct page **tmp;
+ int ret = 0, i;
+
+ tmp = kzalloc((num_present + num_alloc) * sizeof(*tmp), GFP_KERNEL);
+
+ if (!tmp) {
+ dprintk("unable to allocate memory\n");
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ for (i = 0; i < (int)num_alloc; ++i) {
+ tmp[num_present + i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+ if (!tmp[num_present + i]) {
+ dprintk("unable to allocate page\n");
+ ret = -ENOMEM;
+ goto fail2;
+ }
+ }
+
+ if (num_present > 0) {
+ memcpy(tmp, *pages, num_present * sizeof(*tmp));
+ kfree(*pages);
+ }
+
+ *pages = tmp;
+
+ return 0;
+
+fail2:
+ while (--i >= 0) {
+ __free_page(tmp[num_present + i]);
+ }
+ kfree(tmp);
+fail1:
+ return ret;
+}
+
+static void yagl_put_pages(struct page ***pages, u32 num_present, u32 num_put)
+{
+ u32 i;
+
+ for (i = 1; i <= num_put; ++i) {
+ __free_page((*pages)[num_present - i]);
+ }
+
+ if (num_present == num_put) {
+ kfree(*pages);
+ *pages = NULL;
+ }
+}
+
+static int yagl_misc_open(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+ struct yagl_device *device = container_of(file->private_data,
+ struct yagl_device,
+ miscdev);
+ struct yagl_file *yfile;
+ int i;
+ u8 *buff;
+ pid_t process_id;
+ pid_t thread_id;
+
+ mutex_lock(&device->mutex);
+
+ if (file->f_mode & FMODE_EXEC) {
+ ret = -EPERM;
+ goto fail1;
+ }
+
+ yfile = kzalloc(sizeof(*yfile), GFP_KERNEL);
+
+ if (!yfile) {
+ dprintk("unable to allocate memory\n");
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ yfile->device = device;
+ yfile->index = -1;
+
+ for (i = 0; i < YAGL_MAX_USERS; ++i) {
+ if (!device->user_map[i]) {
+ yfile->index = i;
+ device->user_map[i] = 1;
+ break;
+ }
+ }
+
+ if (yfile->index == -1) {
+ print_error("no free slots\n");
+ ret = -ENOMEM;
+ goto fail2;
+ }
+
+ yfile->num_pages = 1;
+ ret = yagl_alloc_pages(&yfile->pages, 0, yfile->num_pages);
+
+ if (ret != 0) {
+ goto fail3;
+ }
+
+ buff = kmap(yfile->pages[0]);
+
+ memset(buff, 0, PAGE_SIZE);
+
+ process_id = task_tgid_vnr(current);
+ thread_id = task_pid_vnr(current);
+
+ yagl_marshal_put_uint32_t(&buff, YAGL_VERSION);
+ yagl_marshal_put_uint32_t(&buff, process_id);
+ yagl_marshal_put_uint32_t(&buff, thread_id);
+
+ yagl_user_activate_update(device->regs,
+ yfile->index,
+ page_to_phys(yfile->pages[0]));
+
+ if (yagl_marshal_get_uint32_t(&buff) != 1) {
+ ret = -EIO;
+ print_error("unable to init YaGL: probably version mismatch\n");
+ goto fail4;
+ }
+
+ yfile->render_type = yagl_marshal_get_uint32_t(&buff);
+ yfile->gl_version = yagl_marshal_get_uint32_t(&buff);
+
+ kunmap(yfile->pages[0]);
+
+ INIT_LIST_HEAD(&yfile->mlock_list);
+
+ file->private_data = yfile;
+
+ mutex_unlock(&device->mutex);
+
+ print_info("%d opened\n", yfile->index);
+
+ return nonseekable_open(inode, file);
+
+fail4:
+ kunmap(yfile->pages[0]);
+ yagl_put_pages(&yfile->pages, yfile->num_pages, yfile->num_pages);
+fail3:
+ device->user_map[yfile->index] = 0;
+fail2:
+ kfree(yfile);
+fail1:
+ mutex_unlock(&device->mutex);
+
+ return ret;
+}
+
+static int yagl_misc_release(struct inode *inode, struct file *file)
+{
+ struct yagl_file *yfile = file->private_data;
+ struct yagl_mlock *mlock, *tmp;
+ u32 i;
+
+ mutex_lock(&yfile->device->mutex);
+
+ yagl_user_deactivate(yfile->device->regs, yfile->index);
+
+ list_for_each_entry_safe(mlock, tmp, &yfile->mlock_list, list) {
+ for (i = 0; i < mlock->num_pages; ++i) {
+ set_page_dirty_lock(mlock->pages[i]);
+ put_page(mlock->pages[i]);
+ }
+ kfree(mlock->pages);
+ list_del(&mlock->list);
+ kfree(mlock);
+ }
+
+ yagl_put_pages(&yfile->pages, yfile->num_pages, yfile->num_pages);
+
+ yfile->device->user_map[yfile->index] = 0;
+
+ mutex_unlock(&yfile->device->mutex);
+
+ print_info("%d closed\n", yfile->index);
+
+ kfree(file->private_data);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static int yagl_misc_mmap_regs(struct yagl_file *yfile,
+ struct vm_area_struct *vma)
+{
+ int ret = 0;
+ u32 num_pages = (vma->vm_end - vma->vm_start) / PAGE_SIZE;
+
+ if (num_pages != 1) {
+ dprintk("%d mmap must be called for 1 page only\n",
+ yfile->index);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ ret = remap_pfn_range(vma,
+ vma->vm_start,
+ (yfile->device->regs_pa >> PAGE_SHIFT),
+ num_pages,
+ vma->vm_page_prot);
+
+ if (ret != 0) {
+ dprintk("%d unable to remap regs memory: %d\n",
+ yfile->index,
+ ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int yagl_misc_mmap_buffer(struct yagl_file *yfile,
+ struct vm_area_struct *vma)
+{
+ int ret = 0;
+ u32 i, num_pages = (vma->vm_end - vma->vm_start) / PAGE_SIZE;
+ u8 *buff;
+ u32 status;
+ unsigned long addr;
+
+ if (num_pages == 0) {
+ dprintk("%d mmap must be called with one page or more\n",
+ yfile->index);
+ return -EINVAL;
+ }
+
+ if (num_pages > ((PAGE_SIZE / 8) - 2)) {
+ dprintk("%d mmap must be called with not more than %ld pages\n",
+ yfile->index,
+ ((PAGE_SIZE / 8) - 2));
+ return -EINVAL;
+ }
+
+ if (num_pages != yfile->num_pages) {
+ if (num_pages > yfile->num_pages) {
+ ret = yagl_alloc_pages(&yfile->pages,
+ yfile->num_pages,
+ (num_pages - yfile->num_pages));
+
+ if (ret != 0) {
+ goto out;
+ }
+
+ /*
+ * We have at least one new page, use it for page list.
+ */
+
+ buff = kmap(yfile->pages[num_pages - 1]);
+
+ memset(buff, 0, PAGE_SIZE);
+
+ yagl_marshal_put_page_list(&buff, yfile->pages, num_pages);
+
+ yagl_user_activate_update(yfile->device->regs,
+ yfile->index,
+ page_to_phys(yfile->pages[num_pages - 1]));
+
+ status = yagl_marshal_get_uint32_t(&buff);
+
+ kunmap(yfile->pages[num_pages - 1]);
+
+ if (status != 1) {
+ yagl_put_pages(&yfile->pages,
+ num_pages,
+ (num_pages - yfile->num_pages));
+ ret = -EIO;
+ print_error("%d unable to increase YaGL buffer due to host error\n",
+ yfile->index);
+ goto out;
+ }
+ } else {
+ /*
+ * We're putting at least one page, use it for page list before
+ * putting.
+ */
+
+ buff = kmap(yfile->pages[yfile->num_pages - 1]);
+
+ memset(buff, 0, PAGE_SIZE);
+
+ yagl_marshal_put_page_list(&buff, yfile->pages, num_pages);
+
+ yagl_user_activate_update(yfile->device->regs,
+ yfile->index,
+ page_to_phys(yfile->pages[yfile->num_pages - 1]));
+
+ status = yagl_marshal_get_uint32_t(&buff);
+
+ kunmap(yfile->pages[yfile->num_pages - 1]);
+
+ if (status != 1) {
+ ret = -EIO;
+ print_error("%d unable to decrease YaGL buffer due to host error\n",
+ yfile->index);
+ goto out;
+ }
+
+ yagl_put_pages(&yfile->pages,
+ yfile->num_pages,
+ (yfile->num_pages - num_pages));
+ }
+ }
+
+ yfile->num_pages = num_pages;
+
+ vma->vm_flags |= VM_DONTDUMP | VM_DONTEXPAND;
+
+ addr = vma->vm_start;
+
+ for (i = 0; i < num_pages; ++i) {
+ ret = vm_insert_page(vma, addr, yfile->pages[i]);
+ if (ret != 0) {
+ dprintk("%d unable to map buffer: %d\n",
+ yfile->index,
+ ret);
+ goto out;
+ }
+
+ addr += PAGE_SIZE;
+ }
+
+out:
+ return ret;
+}
+
+static int yagl_misc_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct yagl_file *yfile = file->private_data;
+ int ret = 0;
+
+ dprintk("user = %d, pgoff = %lu, size = %lu\n",
+ yfile->index,
+ vma->vm_pgoff,
+ (vma->vm_end - vma->vm_start));
+
+ mutex_lock(&yfile->device->mutex);
+
+ if (vma->vm_pgoff == 0) {
+ /*
+ * First page is 'regs'.
+ */
+
+ ret = yagl_misc_mmap_regs(yfile, vma);
+ } else if (vma->vm_pgoff == 1) {
+ /*
+ * Everything else is buffer.
+ */
+
+ ret = yagl_misc_mmap_buffer(yfile, vma);
+ } else {
+ dprintk("%d mmap must be called with page offset 0 or 1\n",
+ yfile->index);
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&yfile->device->mutex);
+
+ return ret;
+}
+
+static int yagl_misc_mlock(struct yagl_file *yfile,
+ const struct yagl_mlock_arg *arg)
+{
+ int ret, i;
+ unsigned long address = arg->address & PAGE_MASK;
+ struct yagl_mlock *mlock;
+
+ dprintk("user = %d, address = %p, size = %u\n",
+ yfile->index,
+ (void*)arg->address,
+ arg->size);
+
+ if (arg->size == 0) {
+ dprintk("%d unable to mlock 0 bytes\n",
+ yfile->index);
+ return -EFAULT;
+ }
+
+ down_read(¤t->mm->mmap_sem);
+ mutex_lock(&yfile->device->mutex);
+
+ list_for_each_entry(mlock, &yfile->mlock_list, list) {
+ if (mlock->address == address) {
+ dprintk("%d address %p already locked\n",
+ yfile->index,
+ (void*)address);
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+
+ mlock = kzalloc(sizeof(*mlock), GFP_KERNEL);
+
+ if (!mlock) {
+ dprintk("%d unable to allocate memory\n",
+ yfile->index);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mlock->address = address;
+ mlock->num_pages = PAGE_ALIGN((arg->address & ~PAGE_MASK) + arg->size) >> PAGE_SHIFT;
+ mlock->pages = kzalloc(mlock->num_pages * sizeof(*mlock->pages), GFP_KERNEL);
+
+ if (!mlock->pages) {
+ dprintk("%d unable to allocate memory\n",
+ yfile->index);
+ kfree(mlock);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = get_user_pages(current, current->mm, mlock->address,
+ mlock->num_pages, 1, 0, mlock->pages, NULL);
+
+ if (ret < (int)mlock->num_pages) {
+ mutex_unlock(&yfile->device->mutex);
+ up_read(¤t->mm->mmap_sem);
+
+ for (i = 0; i < ret; ++i) {
+ put_page(mlock->pages[i]);
+ }
+ kfree(mlock->pages);
+ kfree(mlock);
+
+ ret = (ret >= 0) ? -EFAULT : ret;
+
+ dprintk("%d unable to get user pages: %d\n",
+ yfile->index,
+ ret);
+
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&mlock->list);
+
+ list_add_tail(&mlock->list, &yfile->mlock_list);
+
+ ret = 0;
+
+out:
+ mutex_unlock(&yfile->device->mutex);
+ up_read(¤t->mm->mmap_sem);
+
+ return ret;
+}
+
+static int yagl_misc_munlock(struct yagl_file *yfile,
+ unsigned long address)
+{
+ u32 i;
+ struct yagl_mlock *mlock;
+
+ dprintk("user = %d, address = %p\n",
+ yfile->index,
+ (void*)address);
+
+ address &= PAGE_MASK;
+
+ mutex_lock(&yfile->device->mutex);
+
+ list_for_each_entry(mlock, &yfile->mlock_list, list) {
+ if (mlock->address == address) {
+ for (i = 0; i < mlock->num_pages; ++i) {
+ set_page_dirty_lock(mlock->pages[i]);
+ put_page(mlock->pages[i]);
+ }
+ kfree(mlock->pages);
+ list_del(&mlock->list);
+ kfree(mlock);
+
+ mutex_unlock(&yfile->device->mutex);
+
+ return 0;
+ }
+ }
+
+ mutex_unlock(&yfile->device->mutex);
+
+ dprintk("%d address %p not locked\n",
+ yfile->index,
+ (void*)address);
+
+ return -ENOENT;
+}
+
+static long yagl_misc_ioctl(struct file* file, unsigned int cmd, unsigned long arg)
+{
+ struct yagl_file *yfile = file->private_data;
+ int ret = 0;
+ union
+ {
+ unsigned int uint;
+ unsigned long ulong;
+ struct yagl_user_info user_info;
+ struct yagl_mlock_arg mlock_arg;
+ } value;
+
+ if (_IOC_TYPE(cmd) != YAGL_IOC_MAGIC) {
+ return -ENOTTY;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ) {
+ ret = !access_ok(VERIFY_WRITE, (void __user*)arg, _IOC_SIZE(cmd));
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ ret = ret || !access_ok(VERIFY_READ, (void __user*)arg, _IOC_SIZE(cmd));
+ }
+
+ if (ret != 0) {
+ return -EFAULT;
+ }
+
+ ret = 0;
+
+ switch (cmd) {
+ case YAGL_IOC_GET_VERSION:
+ value.uint = YAGL_VERSION;
+ ret = put_user(value.uint, (unsigned int __user*)arg);
+ break;
+ case YAGL_IOC_GET_USER_INFO:
+ value.user_info.index = yfile->index;
+ value.user_info.render_type = yfile->render_type;
+ value.user_info.gl_version = yfile->gl_version;
+ if (copy_to_user((struct yagl_user_info __user*)arg,
+ &value.user_info,
+ sizeof(value.user_info)) != 0) {
+ ret = -EFAULT;
+ }
+ break;
+ case YAGL_IOC_MLOCK:
+ if (copy_from_user(&value.mlock_arg,
+ (struct yagl_mlock_arg __user*)arg,
+ sizeof(value.mlock_arg)) == 0) {
+ ret = yagl_misc_mlock(yfile, &value.mlock_arg);
+ } else {
+ ret = -EFAULT;
+ }
+ break;
+ case YAGL_IOC_MUNLOCK:
+ ret = get_user(value.ulong, (unsigned long __user*)arg);
+ if (ret == 0) {
+ ret = yagl_misc_munlock(yfile, value.ulong);
+ }
+ break;
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+
+ return ret;
+}
+
+static struct file_operations yagl_misc_fops =
+{
+ .owner = THIS_MODULE,
+ .open = yagl_misc_open,
+ .mmap = yagl_misc_mmap,
+ .release = yagl_misc_release,
+ .unlocked_ioctl = yagl_misc_ioctl,
+};
+
+static int yagl_driver_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
+{
+ int ret = 0;
+ struct yagl_device *device = NULL;
+ u32 mem_size = 0;
+
+ dprintk("probing PCI device \"%s\"\n", dev_name(&pci_dev->dev));
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+
+ if (!device) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = pci_enable_device(pci_dev);
+
+ if (ret != 0) {
+ dprintk("%s: unable to enable PCI device\n", dev_name(&pci_dev->dev));
+
+ goto fail;
+ }
+
+ device->pci_dev = pci_dev;
+
+ pci_set_master(pci_dev);
+
+ if (!pci_resource_start(pci_dev, 0)) {
+ dprintk("%s: bad PCI resource\n", dev_name(&pci_dev->dev));
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ mem_size = pci_resource_len(pci_dev, 0);
+
+ if (mem_size != PAGE_SIZE) {
+ dprintk("%s: mem size must be PAGE_SIZE\n", dev_name(&pci_dev->dev));
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ if (!request_mem_region(pci_resource_start(pci_dev, 0),
+ PAGE_SIZE,
+ dev_name(&pci_dev->dev))) {
+ dprintk("%s: mem size must be PAGE_SIZE\n", dev_name(&pci_dev->dev));
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ device->regs_pa = pci_resource_start(pci_dev, 0);
+
+ device->regs = ioremap(device->regs_pa, mem_size);
+
+ if (!device->regs) {
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ mutex_init(&device->mutex);
+
+ device->miscdev.minor = MISC_DYNAMIC_MINOR;
+ device->miscdev.name = YAGL_NAME;
+ device->miscdev.fops = &yagl_misc_fops;
+
+ ret = misc_register(&device->miscdev);
+
+ if (ret != 0) {
+ dprintk("%s: unable to register misc device\n", dev_name(&pci_dev->dev));
+
+ goto fail;
+ }
+
+ pci_set_drvdata(pci_dev, device);
+
+ print_info("%s: device added\n", dev_name(&pci_dev->dev));
+
+ return 0;
+
+fail:
+ if (device) {
+ if (device->regs) {
+ iounmap(device->regs);
+ }
+ if (device->regs_pa) {
+ release_mem_region(device->regs_pa, mem_size);
+ }
+ if (device->pci_dev) {
+ pci_disable_device(device->pci_dev);
+ }
+ kfree(device);
+ }
+
+ return ret;
+}
+
+static void yagl_driver_remove(struct pci_dev *pci_dev)
+{
+ struct yagl_device* device;
+
+ dprintk("removing driver from \"%s\"\n", dev_name(&pci_dev->dev));
+
+ device = pci_get_drvdata(pci_dev);
+
+ if (device != NULL) {
+ misc_deregister(&device->miscdev);
+
+ pci_set_drvdata(pci_dev, NULL);
+
+ iounmap(device->regs);
+ release_mem_region(device->regs_pa, PAGE_SIZE);
+ pci_disable_device(device->pci_dev);
+ kfree(device);
+ }
+}
+
+static struct pci_driver yagl_driver =
+{
+ .name = YAGL_NAME,
+ .id_table = yagl_pci_table,
+ .probe = yagl_driver_probe,
+ .remove = yagl_driver_remove,
+};
+
+int yagl_driver_register(void)
+{
+ return pci_register_driver(&yagl_driver);
+}
+
+void yagl_driver_unregister(void)
+{
+ pci_unregister_driver(&yagl_driver);
+}
--- /dev/null
+#ifndef _YAGL_DRIVER_H_
+#define _YAGL_DRIVER_H_
+
+#include <linux/types.h>
+
+int yagl_driver_register(void);
+
+void yagl_driver_unregister(void);
+
+#endif
--- /dev/null
+#ifndef _YAGL_IOCTL_H_
+#define _YAGL_IOCTL_H_
+
+#include <linux/ioctl.h>
+
+/*
+ * Version number.
+ */
+#define YAGL_VERSION 24
+
+/*
+ * Device control codes magic.
+ */
+#define YAGL_IOC_MAGIC 'Y'
+
+/*
+ * Get version number.
+ */
+#define YAGL_IOC_GET_VERSION _IOR(YAGL_IOC_MAGIC, 0, unsigned int)
+
+/*
+ * Get user info.
+ */
+struct yagl_user_info
+{
+ unsigned int index;
+ unsigned int render_type;
+ unsigned int gl_version;
+};
+
+#define YAGL_IOC_GET_USER_INFO _IOR(YAGL_IOC_MAGIC, 1, struct yagl_user_info)
+
+/*
+ * Locks/unlocks memory. Exists solely
+ * for offscreen backend's backing images.
+ * @{
+ */
+
+struct yagl_mlock_arg
+{
+ unsigned long address;
+ unsigned int size;
+};
+
+#define YAGL_IOC_MLOCK _IOW(YAGL_IOC_MAGIC, 2, struct yagl_mlock_arg)
+
+#define YAGL_IOC_MUNLOCK _IOW(YAGL_IOC_MAGIC, 3, unsigned long)
+
+/*
+ * @}
+ */
+
+#endif
--- /dev/null
+menuconfig MARU
+ tristate "MARU virtual device drivers for emulator"
+ default n
+
+config MARU_VIRTIO_TOUCHSCREEN
+ tristate "MARU Virtio Touchscreen Driver"
+ depends on MARU != n
+
+config MARU_CAMERA
+ tristate "MARU Camera Driver"
+ depends on MARU != n && VIDEO_DEV && VIDEO_V4L2
+ select VIDEOBUF_VMALLOC
+
+config MARU_BACKLIGHT
+ tristate "MARU Backlight Driver"
+ depends on MARU && BACKLIGHT_CLASS_DEVICE
+ default y
+ help
+ Say Y to enable the backlight driver of MARU.
+
+config MARU_JACK
+ tristate "MARU Jack Driver"
+ depends on MARU != n
+
+config MARU_POWER_SUPPLY
+ tristate "MARU Power supply Driver"
+ depends on MARU != n && !POWER_SUPPLY
+
+config MARU_VIRTIO_HWKEY
+ tristate "MARU Virtio HW Key Driver"
+ depends on MARU != n
+
+config MARU_VIRTIO_TABLET
+ tristate "MARU Virtio Tablet Driver"
+ depends on MARU != n
+
+config MARU_VIRTIO_KEYBOARD
+ tristate "MARU Virtio Keyboard Driver"
+ depends on MARU != n
+
+config MARU_VIRTIO_EVDI
+ tristate "MARU VirtIO Emulator Virtual Device Interface Driver"
+ depends on MARU != n
+
+config MARU_VIRTIO_SENSOR
+ tristate "MARU VirtIO Virtual Sensor Device Driver"
+ depends on MARU != n
+
+config MARU_VIRTIO_NFC
+ tristate "MARU VirtIO Virtual NFC Device Driver"
+ depends on MARU != n
+
+config MARU_BRILLCODEC
+ tristate "MARU brillcodec driver"
+ depends on MARU != n
+
+config MARU_VIRTIO_VMODEM
+ tristate "MARU VirtIO Virtual Modem Device Driver"
+ depends on MARU != n
+
+config MARU_VIRTIO_ROTARY
+ tristate "MARU VirtIO Virtual Rotary Device Driver"
+ depends on MARU != n
+
+config MARU_EXTENSION_SOURCE
+ tristate "MARU Extension source"
+ depends on MARU != n
+
+config MARU_EXTENSION_SOURCE_PATH
+ string "MARU Extension source path"
+ depends on MARU != n && MARU_EXTENSION_SOURCE != n
--- /dev/null
+ccflags-y += -Werror
+obj-$(CONFIG_MARU_VIRTIO_TOUCHSCREEN) += maru_virtio_touchscreen.o
+ifndef CONFIG_MARU_EXTENSION_SOURCE
+obj-$(CONFIG_MARU_CAMERA) += maru_camera.o
+endif
+obj-$(CONFIG_MARU_BACKLIGHT) += maru_bl.o
+obj-$(CONFIG_MARU_VIRTIO_HWKEY) += maru_virtio_hwkey.o
+obj-$(CONFIG_MARU_JACK) += maru_jack.o
+obj-$(CONFIG_MARU_POWER_SUPPLY) += maru_power_supply.o
+obj-$(CONFIG_MARU_VIRTIO_KEYBOARD) += maru_virtio_keyboard.o
+obj-$(CONFIG_MARU_VIRTIO_NFC) += maru_virtio_nfc.o
+obj-$(CONFIG_MARU_VIRTIO_EVDI) += maru_virtio_evdi.o
+obj-$(CONFIG_MARU_VIRTIO_SENSOR) += sensors/ #maru_virtio_sensor.o
+obj-$(CONFIG_MARU_BRILLCODEC) += maru_brillcodec.o
+obj-$(CONFIG_MARU_VIRTIO_VMODEM) += maru_virtio_vmodem.o
+obj-$(CONFIG_MARU_VIRTIO_ROTARY) += maru_virtio_rotary.o
+obj-$(CONFIG_MARU_VIRTIO_TABLET) += maru_virtio_tablet.o
+obj-$(CONFIG_MARU_EXTENSION_SOURCE) += $(CONFIG_MARU_EXTENSION_SOURCE_PATH)/
--- /dev/null
+/*
+ * MARU Virtual Backlight Driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Jinhyung Jo <jinhyung.jo@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ * Dohyung Hong
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/ctype.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+#include <linux/lcd.h>
+
+#include <linux/uaccess.h>
+
+#define MARUBL_DRIVER_NAME "maru_backlight"
+
+#define MIN_BRIGHTNESS 0
+#define MAX_BRIGHTNESS 100
+
+static struct pci_device_id marubl_pci_table[] = {
+ {
+ .vendor = PCI_VENDOR_ID_TIZEN,
+ .device = PCI_DEVICE_ID_VIRTUAL_BRIGHTNESS,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, marubl_pci_table);
+
+/* MARU virtual brightness(backlight) device structure */
+struct marubl {
+ struct backlight_device *bl_dev;
+ struct lcd_device *lcd_dev;
+ unsigned int prev_brightness;
+ unsigned int brightness;
+ resource_size_t reg_start, reg_size;
+ /* memory mapped registers */
+ unsigned char __iomem *marubl_mmreg;
+ int power_off;
+ int hbm_on;
+};
+
+/* ========================================================================== */
+static struct marubl *marubl_device;
+/* ========================================================================== */
+
+static int min_brightness = MIN_BRIGHTNESS;
+static int max_brightness = MAX_BRIGHTNESS;
+
+static int marubl_get_intensity(struct backlight_device *bd)
+{
+ return marubl_device->brightness;
+}
+
+static int marubl_send_intensity(struct backlight_device *bd)
+{
+ int intensity = bd->props.brightness;
+ unsigned int off = 0;
+
+ if (bd->props.power != FB_BLANK_UNBLANK) {
+ intensity = 0;
+ off = 1;
+ }
+ if (bd->props.state & BL_CORE_FBBLANK) {
+ intensity = 0;
+ off = 1;
+ }
+ if (bd->props.state & BL_CORE_SUSPENDED) {
+ intensity = 0;
+ off = 1;
+ }
+ if (marubl_device->hbm_on && !off && intensity != MAX_BRIGHTNESS) {
+ marubl_device->hbm_on = 0;
+ printk(KERN_INFO "HBM is turned off because brightness reduced.\n");
+ }
+
+ writel(intensity, marubl_device->marubl_mmreg);
+ writel(off, marubl_device->marubl_mmreg + 0x04);
+ marubl_device->brightness = intensity;
+ marubl_device->power_off = off ? 1 : 0;
+
+ return 0;
+}
+
+static const struct backlight_ops marubl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = marubl_get_intensity,
+ .update_status = marubl_send_intensity,
+};
+
+int maru_lcd_get_power(struct lcd_device *ld)
+{
+ int ret = 0;
+
+ if (marubl_device->power_off) {
+ ret = FB_BLANK_POWERDOWN;
+ } else {
+ ret = FB_BLANK_UNBLANK;
+ }
+ return ret;
+}
+
+static struct lcd_ops maru_lcd_ops = {
+ .get_power = maru_lcd_get_power,
+};
+
+static ssize_t hbm_show_status(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int rc;
+
+ rc = sprintf(buf, "%s\n", marubl_device->hbm_on ? "on" : "off");
+ printk(KERN_INFO "[%s] get: %d\n", __func__, marubl_device->hbm_on);
+
+ return rc;
+}
+
+static ssize_t hbm_store_status(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret = -1;
+
+ if (strcmp(buf, "on") == 0) {
+ ret = 1;
+ } else if (strcmp(buf, "off") == 0) {
+ ret = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ /* If the same as the previous state, ignore it */
+ if (ret == marubl_device->hbm_on)
+ return count;
+
+ if (ret) {
+ /* Save previous level, set to MAX level */
+ mutex_lock(&marubl_device->bl_dev->ops_lock);
+ marubl_device->prev_brightness =
+ marubl_device->bl_dev->props.brightness;
+ marubl_device->bl_dev->props.brightness = MAX_BRIGHTNESS;
+ marubl_send_intensity(marubl_device->bl_dev);
+ mutex_unlock(&marubl_device->bl_dev->ops_lock);
+ } else {
+ /* Restore previous level */
+ mutex_lock(&marubl_device->bl_dev->ops_lock);
+ marubl_device->bl_dev->props.brightness =
+ marubl_device->prev_brightness;
+ marubl_send_intensity(marubl_device->bl_dev);
+ mutex_unlock(&marubl_device->bl_dev->ops_lock);
+ }
+ marubl_device->hbm_on = ret;
+ printk(KERN_INFO "[%s] hbm = %d\n", __func__, ret);
+
+ return count;
+}
+
+static struct device_attribute hbm_device_attr =
+ __ATTR(hbm, 0644, hbm_show_status, hbm_store_status);
+
+/* pci probe function
+*/
+static int marubl_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *ent)
+{
+ int ret;
+ struct backlight_device *bd;
+ struct lcd_device *ld;
+ struct backlight_properties props;
+
+ marubl_device = kmalloc(sizeof(struct marubl), GFP_KERNEL);
+ if (marubl_device == NULL) {
+ printk(KERN_ERR "marubl: kmalloc() is failed.\n");
+ return -ENOMEM;
+ }
+
+ memset(marubl_device, 0, sizeof(struct marubl));
+
+ ret = pci_enable_device(pci_dev);
+ if (ret < 0) {
+ printk(KERN_ERR "marubl: pci_enable_device is failed.\n");
+ kfree(marubl_device);
+ marubl_device = NULL;
+ return ret;
+ }
+
+ ret = -EIO;
+
+ /* 1 : IORESOURCE_MEM */
+ marubl_device->reg_start = pci_resource_start(pci_dev, 1);
+ marubl_device->reg_size = pci_resource_len(pci_dev, 1);
+ if (!request_mem_region(marubl_device->reg_start,
+ marubl_device->reg_size,
+ MARUBL_DRIVER_NAME)) {
+ pci_disable_device(pci_dev);
+ kfree(marubl_device);
+ marubl_device = NULL;
+ return ret;
+ }
+
+ /* memory areas mapped kernel space */
+ marubl_device->marubl_mmreg = ioremap(marubl_device->reg_start,
+ marubl_device->reg_size);
+ if (!marubl_device->marubl_mmreg) {
+ release_mem_region(marubl_device->reg_start,
+ marubl_device->reg_size);
+ pci_disable_device(pci_dev);
+ kfree(marubl_device);
+ marubl_device = NULL;
+ return ret;
+ }
+
+ pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, 64);
+ pci_set_master(pci_dev);
+
+ /*
+ * register High Brightness Mode
+ */
+ ret = device_create_file(&pci_dev->dev, &hbm_device_attr);
+ if (ret < 0) {
+ iounmap(marubl_device->marubl_mmreg);
+ release_mem_region(marubl_device->reg_start,
+ marubl_device->reg_size);
+ pci_disable_device(pci_dev);
+ kfree(marubl_device);
+ marubl_device = NULL;
+ return ret;
+ }
+
+ /*
+ * register backlight device
+ */
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.min_brightness = min_brightness;
+ props.max_brightness = max_brightness;
+ props.type = BACKLIGHT_PLATFORM;
+ bd = backlight_device_register("emulator",
+ &pci_dev->dev,
+ NULL,
+ &marubl_ops,
+ &props);
+ if (IS_ERR(bd)) {
+ ret = PTR_ERR(bd);
+ iounmap(marubl_device->marubl_mmreg);
+ release_mem_region(marubl_device->reg_start,
+ marubl_device->reg_size);
+ pci_disable_device(pci_dev);
+ kfree(marubl_device);
+ marubl_device = NULL;
+ return ret;
+ }
+
+ ld = lcd_device_register("emulator", &pci_dev->dev, NULL, &maru_lcd_ops);
+ if (IS_ERR(ld)) {
+ ret = PTR_ERR(ld);
+ iounmap(marubl_device->marubl_mmreg);
+ release_mem_region(marubl_device->reg_start,
+ marubl_device->reg_size);
+ pci_disable_device(pci_dev);
+ kfree(marubl_device);
+ marubl_device = NULL;
+ return ret;
+ }
+
+ bd->props.brightness = (unsigned int)readl(marubl_device->marubl_mmreg);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+
+ marubl_device->bl_dev = bd;
+ marubl_device->lcd_dev = ld;
+
+ printk(KERN_INFO "marubl: MARU Virtual Backlight driver is loaded.\n");
+ return 0;
+}
+
+static void marubl_exit(struct pci_dev *pcidev)
+{
+ /*
+ * Unregister backlight device
+ */
+ struct backlight_device *bd = marubl_device->bl_dev;
+ struct lcd_device *ld = marubl_device->lcd_dev;
+
+ bd->props.power = 0;
+ bd->props.brightness = 0;
+ backlight_update_status(bd);
+
+ lcd_device_unregister(ld);
+ backlight_device_unregister(bd);
+ device_remove_file(&pcidev->dev, &hbm_device_attr);
+
+ /*
+ * Unregister pci device & delete device
+ */
+ iounmap(marubl_device->marubl_mmreg);
+ release_mem_region(marubl_device->reg_start, marubl_device->reg_size);
+ pci_disable_device(pcidev);
+ kfree(marubl_device);
+ marubl_device = NULL;
+}
+
+/*
+ * register pci driver
+ */
+static struct pci_driver marubl_pci_driver = {
+ .name = MARUBL_DRIVER_NAME,
+ .id_table = marubl_pci_table,
+ .probe = marubl_probe,
+ .remove = marubl_exit,
+#ifdef CONFIG_PM
+ /* .suspend = marubl_suspend, */
+ /* .resume = marubl_resume, */
+#endif
+};
+
+static int __init marubl_module_init(void)
+{
+ return pci_register_driver(&marubl_pci_driver);
+}
+
+static void __exit marubl_module_exit(void)
+{
+ pci_unregister_driver(&marubl_pci_driver);
+}
+
+/*
+ * if this is compiled into the kernel, we need to ensure that the
+ * class is registered before users of the class try to register lcd's
+ */
+module_init(marubl_module_init);
+module_exit(marubl_module_exit);
+
+MODULE_LICENSE("GPL2");
+MODULE_AUTHOR("Jinhyung Jo <jinhyung.jo@samsung.com>");
+MODULE_DESCRIPTION("MARU Virtual Backlight Driver for x86");
--- /dev/null
+/*
+ * Virtual Codec Device Driver
+ *
+ * Copyright (c) 2013 - 2014 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Kitae Kim <kt920.kim@samsung.com>
+ * SeokYeon Hwang <syeon.hwang@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+MODULE_DESCRIPTION("Virtual Codec Device Driver");
+MODULE_AUTHOR("Kitae KIM <kt920.kim@samsung.com");
+MODULE_LICENSE("GPL2");
+
+#define DEVICE_NAME "brillcodec"
+#define DRIVER_VERSION 3
+
+// DEBUG
+//#define CODEC_DEBUG
+
+#ifdef CODEC_DEBUG
+#define DEBUG(fmt, ...) \
+ printk(KERN_DEBUG "[%s][DEBUG][%d]: " fmt, DEVICE_NAME, __LINE__, ##__VA_ARGS__)
+
+#define INFO(fmt, ...) \
+ printk(KERN_INFO "[%s][INFO][%d]: " fmt, DEVICE_NAME, __LINE__, ##__VA_ARGS__)
+#else
+#define DEBUG(fmt, ...)
+
+#define INFO(fmt, ...)
+#endif
+
+#define ERROR(fmt, ...) \
+ printk(KERN_ERR "[%s][ERROR][%d]: " fmt, DEVICE_NAME, __LINE__, ##__VA_ARGS__)
+
+// support memory monopolizing
+#define SUPPORT_MEMORY_MONOPOLIZING
+
+/* vendor, device value for pci.*/
+#define PCI_VENDOR_ID_TIZEN_EMUL 0xC9B5
+#define PCI_DEVICE_ID_VIRTUAL_BRILL_CODEC 0x1040
+
+/* interrupt identifier */
+#define CODEC_IRQ_TASK 0x1f
+
+// define critical section
+DEFINE_SPINLOCK(critical_section);
+
+#define ENTER_CRITICAL_SECTION(flags) spin_lock_irqsave(&critical_section, flags);
+#define LEAVE_CRITICAL_SECTION(flags) spin_unlock_irqrestore(&critical_section, flags);
+
+enum device_cmd { // driver and device
+ DEVICE_CMD_API_INDEX = 0,
+ DEVICE_CMD_CONTEXT_INDEX,
+ DEVICE_CMD_DEVICE_MEM_OFFSET,
+ DEVICE_CMD_GET_THREAD_STATE,
+ DEVICE_CMD_GET_CTX_FROM_QUEUE,
+ DEVICE_CMD_GET_DATA_FROM_QUEUE,
+ DEVICE_CMD_RELEASE_CONTEXT,
+ DEVICE_CMD_GET_ELEMENT,
+ DEVICE_CMD_GET_CONTEXT_INDEX,
+ DEVICE_CMD_GET_DEVICE_INFO,
+ DEVICE_CMD_GET_PROFILE_STATUS,
+};
+
+/* Define i/o and api values. */
+enum ioctl_cmd { // plugin and driver
+ IOCTL_CMD_GET_VERSION = 0,
+ IOCTL_CMD_GET_ELEMENTS_SIZE,
+ IOCTL_CMD_GET_ELEMENTS,
+ IOCTL_CMD_GET_CONTEXT_INDEX,
+ IOCTL_CMD_SECURE_BUFFER,
+ IOCTL_CMD_TRY_SECURE_BUFFER,
+ IOCTL_CMD_RELEASE_BUFFER,
+ IOCTL_CMD_INVOKE_API_AND_GET_DATA,
+ IOCTL_CMD_GET_PROFILE_STATUS,
+};
+
+enum codec_api_index {
+ INIT = 0,
+ DECODE_VIDEO,
+ ENCODE_VIDEO,
+ DECODE_AUDIO,
+ ENCODE_AUDIO,
+ PICTURE_COPY,
+ DEINIT,
+ FLUSH_BUFFERS,
+ DECODE_VIDEO_AND_PICTURE_COPY, // version 3
+};
+
+struct ioctl_data {
+ uint32_t api_index;
+ uint32_t ctx_index;
+ uint32_t mem_offset;
+ int32_t buffer_size;
+} __attribute__((packed));
+
+struct codec_element {
+ void *buf;
+ uint32_t buf_size;
+};
+
+struct context_id {
+ uint32_t id;
+
+ struct list_head node;
+};
+
+struct user_process_id {
+ uintptr_t id;
+ struct list_head ctx_id_mgr;
+
+ struct list_head pid_node;
+};
+
+/* manage device memory block */
+struct device_mem {
+ uint32_t ctx_id;
+ uint32_t mem_offset;
+
+ struct list_head entry;
+};
+
+struct memory_block {
+ uint32_t unit_size;
+ uint32_t n_units;
+
+ uint32_t start_offset;
+ uint32_t end_offset;
+
+ bool last_buf_secured;
+
+ struct device_mem *units;
+
+ struct list_head available;
+ struct list_head occupied;
+
+ struct semaphore semaphore;
+ struct semaphore last_buf_semaphore;
+ struct mutex access_mutex;
+};
+
+struct brillcodec_device {
+ struct pci_dev *dev;
+
+ /* I/O and Memory Region */
+ unsigned int *ioaddr;
+
+ resource_size_t io_start;
+ resource_size_t io_size;
+ resource_size_t mem_start;
+ resource_size_t mem_size;
+
+ struct list_head user_pid_mgr;
+
+ /* task queue */
+ struct memory_block memory_blocks[3];
+
+ spinlock_t lock;
+
+ uint32_t major_version;
+ uint8_t minor_version;
+ uint16_t memory_monopolizing;
+ uint8_t enable_profile;
+ bool codec_elem_cached;
+ struct codec_element codec_elem;
+};
+
+/* device memory */
+#define CODEC_CONTEXT_SIZE 1024
+
+#define CODEC_S_DEVICE_MEM_COUNT 16 // small (256K) 4M
+#define CODEC_M_DEVICE_MEM_COUNT 8 // medium (2M) 16M
+#define CODEC_L_DEVICE_MEM_COUNT 3 // large (4M) 12M
+
+#define CODEC_S_DEVICE_MEM_SIZE 0x40000 // small
+#define CODEC_M_DEVICE_MEM_SIZE 0x200000 // medium
+#define CODEC_L_DEVICE_MEM_SIZE 0x400000 // large
+
+enum block_size { SMALL, MEDIUM, LARGE };
+
+static struct brillcodec_device *brillcodec_device;
+static int context_flags[CODEC_CONTEXT_SIZE] = { 0, };
+
+// bottom-half
+static DECLARE_WAIT_QUEUE_HEAD(wait_queue);
+
+static struct workqueue_struct *bh_workqueue;
+static void bh_func(struct work_struct *work);
+static DECLARE_WORK(bh_work, bh_func);
+
+static void context_add(uintptr_t user_pid, uint32_t ctx_id);
+static int invoke_api_and_release_buffer(struct ioctl_data *opaque);
+
+static void divide_device_memory(void)
+{
+ int i = 0, cnt = 0;
+ int offset = 0;
+
+ for (i = 0; i < 3; ++i) {
+ struct memory_block *block = &brillcodec_device->memory_blocks[i];
+ block->start_offset = offset;
+ for (cnt = 0; cnt < block->n_units; cnt++) {
+ block->units[cnt].mem_offset = offset;
+ list_add_tail(&block->units[cnt].entry, &block->available);
+
+ offset += block->unit_size;
+ }
+ block->end_offset = offset;
+ }
+}
+
+static void bh_func(struct work_struct *work)
+{
+ uint32_t value;
+
+ DEBUG("%s\n", __func__);
+ do {
+ value =
+ readl(brillcodec_device->ioaddr + DEVICE_CMD_GET_CTX_FROM_QUEUE);
+ DEBUG("read a value from device %x.\n", value);
+ if (value) {
+ context_flags[value] = 1;
+ wake_up_interruptible(&wait_queue);
+ } else {
+ DEBUG("there is no available task\n");
+ }
+ } while (value);
+}
+
+static int secure_device_memory(uint32_t ctx_id, uint32_t buf_size,
+ int non_blocking, uint32_t* offset)
+{
+ int ret = 0;
+ struct device_mem *unit = NULL;
+ enum block_size index = SMALL;
+ struct memory_block* block = NULL;
+
+ if (buf_size < CODEC_S_DEVICE_MEM_SIZE) {
+ index = SMALL;
+ } else if (buf_size < CODEC_M_DEVICE_MEM_SIZE) {
+ index = MEDIUM;
+ } else if (buf_size < CODEC_L_DEVICE_MEM_SIZE) {
+ index = LARGE;
+ } else {
+ ERROR("invalid buffer size: %x\n", buf_size);
+ return -1;
+ }
+
+ block = &brillcodec_device->memory_blocks[index];
+
+ // decrease buffer_semaphore
+ DEBUG("before down buffer_sema: %d\n", block->semaphore.count);
+
+ if (non_blocking) {
+ if (down_trylock(&block->semaphore)) { // if 1
+ DEBUG("buffer is not available now\n");
+ return -1;
+ }
+ } else {
+ if (down_trylock(&block->semaphore)) { // if 1
+ if (down_interruptible(&block->last_buf_semaphore)) { // if -EINTR
+ DEBUG("down_interruptible interrupted\n");
+ return -1;
+ }
+ block->last_buf_secured = 1; // protected under last_buf_semaphore
+ ret = 1;
+ DEBUG("lock last buffer semaphore.\n");
+ }
+ }
+
+ DEBUG("after down buffer_sema: %d\n", block->semaphore.count);
+
+ mutex_lock(&block->access_mutex);
+ unit = list_first_entry(&block->available, struct device_mem, entry);
+ if (!unit) {
+ // available unit counts are protected under semaphore.
+ // so can not enter here...
+ ret = -1;
+ if (block->last_buf_secured) {
+ up(&block->last_buf_semaphore);
+ } else {
+ up(&block->semaphore);
+ }
+ ERROR("failed to get memory block.\n");
+ } else {
+ unit->ctx_id = ctx_id;
+ list_move_tail(&unit->entry, &block->occupied);
+ *offset = unit->mem_offset;
+ DEBUG("get available memory region: 0x%x\n", ret);
+ }
+ mutex_unlock(&block->access_mutex);
+
+ return ret;
+}
+
+static int release_device_memory(uint32_t mem_offset)
+{
+ struct device_mem *unit = NULL;
+ enum block_size index = SMALL;
+ struct memory_block *block = NULL;
+ bool found = false;
+ int ret = 0;
+
+ struct list_head *pos, *temp;
+
+ if (mem_offset < brillcodec_device->memory_blocks[0].end_offset) {
+ index = SMALL;
+ } else if (mem_offset < brillcodec_device->memory_blocks[1].end_offset) {
+ index = MEDIUM;
+ } else if (mem_offset < brillcodec_device->memory_blocks[2].end_offset) {
+ index = LARGE;
+ } else {
+ // error
+ ERROR("invalid memory offsset. offset = 0x%x.\n", (uint32_t)mem_offset);
+ return -2;
+ }
+
+ block = &brillcodec_device->memory_blocks[index];
+
+ mutex_lock(&block->access_mutex);
+ if (!list_empty(&block->occupied)) {
+ list_for_each_safe(pos, temp, &block->occupied) {
+ unit = list_entry(pos, struct device_mem, entry);
+ if (unit->mem_offset == (uint32_t)mem_offset) {
+ unit->ctx_id = 0;
+ list_move_tail(&unit->entry, &block->available);
+
+ if (block->last_buf_secured) {
+ block->last_buf_secured = 0;
+ up(&block->last_buf_semaphore);
+ DEBUG("unlock last buffer semaphore.\n");
+ } else {
+ up(&block->semaphore);
+ DEBUG("unlock semaphore: %d.\n", block->semaphore.count);
+ }
+
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ // can not enter here...
+ ERROR("cannot find this memory block. offset = 0x%x.\n", (uint32_t)mem_offset);
+ ret = -1;
+ }
+ } else {
+ // can not enter here...
+ ERROR("there is not any using memory block.\n");
+ ret = -1;
+ }
+ mutex_unlock(&block->access_mutex);
+
+ return ret;
+}
+
+static void dispose_device_memory(uint32_t context_id)
+{
+ struct device_mem *unit = NULL;
+ int index = 0;
+ struct memory_block *block = NULL;
+ struct list_head *pos, *temp;
+
+ for (index = SMALL; index <= LARGE; index++) {
+ block = &brillcodec_device->memory_blocks[index];
+
+ mutex_lock(&block->access_mutex);
+ if (!list_empty(&block->occupied)) {
+ list_for_each_safe(pos, temp, &block->occupied) {
+ unit = list_entry(pos, struct device_mem, entry);
+ if (unit->ctx_id == context_id) {
+ unit->ctx_id = 0;
+ list_move_tail(&unit->entry, &block->available);
+ INFO("dispose memory block: %x", unit->mem_offset);
+ }
+ }
+ }
+ mutex_unlock(&block->access_mutex);
+ }
+}
+
+static inline bool is_memory_monopolizing_api(int api_index) {
+#ifdef SUPPORT_MEMORY_MONOPOLIZING
+ if (brillcodec_device->memory_monopolizing & (1 << api_index)) {
+ DEBUG("API [%d] monopolize memory slot\n", api_index);
+ return true;
+ }
+#endif
+ return false;
+}
+
+static void cache_info(void)
+{
+ void __iomem *memaddr = NULL;
+ void *codec_info = NULL;
+ uint32_t codec_info_len = 0;
+
+ memaddr = ioremap(brillcodec_device->mem_start,
+ brillcodec_device->mem_size);
+ if (!memaddr) {
+ ERROR("ioremap failed\n");
+ return;
+ }
+
+ codec_info_len = *(uint32_t *)memaddr;
+
+ codec_info =
+ kzalloc(codec_info_len, GFP_KERNEL);
+ if (!codec_info) {
+ ERROR("falied to allocate codec_info memory!\n");
+ return;
+ }
+
+ memcpy(codec_info, (uint8_t *)memaddr + sizeof(uint32_t), codec_info_len);
+ iounmap(memaddr);
+
+ brillcodec_device->codec_elem.buf = codec_info;
+ brillcodec_device->codec_elem.buf_size = codec_info_len;
+ brillcodec_device->codec_elem_cached = true;
+}
+
+static long put_data_into_buffer(struct ioctl_data *data) {
+ long value = 0, ret = 0;
+ unsigned long flags;
+
+ DEBUG("read data into buffer\n");
+
+ if (!is_memory_monopolizing_api(data->api_index)) {
+ value = secure_device_memory(data->ctx_index, data->buffer_size, 0, &data->mem_offset);
+ }
+
+ if (value < 0) {
+ DEBUG("failed to get available memory\n");
+ ret = -EINVAL;
+ } else {
+ DEBUG("send a request to pop data from device. %d\n", data->ctx_index);
+
+ ENTER_CRITICAL_SECTION(flags);
+ writel((uint32_t)data->mem_offset,
+ brillcodec_device->ioaddr + DEVICE_CMD_DEVICE_MEM_OFFSET);
+ writel((uint32_t)data->ctx_index,
+ brillcodec_device->ioaddr + DEVICE_CMD_GET_DATA_FROM_QUEUE);
+ LEAVE_CRITICAL_SECTION(flags);
+ }
+
+ /* 1 means that only an available buffer is left at the moment.
+ * gst-plugins-emulator will allocate heap buffer to store
+ output buffer of codec.
+ */
+ if (value == 1) {
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static long brillcodec_ioctl(struct file *file,
+ unsigned int request,
+ unsigned long arg)
+{
+ long value = 0, ret = 0;
+
+ int cmd = _IOC_NR(request);
+ DEBUG("%s ioctl cmd: %d\n", DEVICE_NAME, cmd);
+
+ switch (cmd) {
+ case IOCTL_CMD_GET_VERSION:
+ {
+ DEBUG("%s version: %d\n", DEVICE_NAME, brillcodec_device->major_version);
+
+ if (copy_to_user((void *)arg, &brillcodec_device->major_version, sizeof(int))) {
+ ERROR("ioctl: failed to copy data to user\n");
+ ret = -EIO;
+ }
+ break;
+ }
+ case IOCTL_CMD_GET_ELEMENTS_SIZE:
+ {
+ uint32_t len = 0;
+ unsigned long flags;
+
+ DEBUG("request a device to get codec elements\n");
+
+ ENTER_CRITICAL_SECTION(flags);
+ if (!brillcodec_device->codec_elem_cached) {
+ value = readl(brillcodec_device->ioaddr + DEVICE_CMD_GET_ELEMENT);
+ if (value < 0) {
+ ERROR("ioctl: failed to get elements. %d\n", (int)value);
+ ret = -EINVAL;
+ }
+ cache_info();
+ }
+ len = brillcodec_device->codec_elem.buf_size;
+ LEAVE_CRITICAL_SECTION(flags);
+
+ if (copy_to_user((void *)arg, &len, sizeof(uint32_t))) {
+ ERROR("ioctl: failed to copy data to user\n");
+ ret = -EIO;
+ }
+ break;
+ }
+ case IOCTL_CMD_GET_ELEMENTS:
+ {
+ void *codec_elem = NULL;
+ uint32_t elem_len = brillcodec_device->codec_elem.buf_size;
+
+ DEBUG("request codec elements.\n");
+
+ codec_elem = brillcodec_device->codec_elem.buf;
+ if (!codec_elem) {
+ ERROR("ioctl: codec elements is empty\n");
+ ret = -EIO;
+ } else if (copy_to_user((void *)arg, codec_elem, elem_len)) {
+ ERROR("ioctl: failed to copy data to user\n");
+ ret = -EIO;
+ }
+ break;
+ }
+ case IOCTL_CMD_GET_CONTEXT_INDEX:
+ {
+ DEBUG("request a device to get an index of codec context \n");
+
+ value = readl(brillcodec_device->ioaddr + DEVICE_CMD_GET_CONTEXT_INDEX);
+ if (value < 1 || value > (CODEC_CONTEXT_SIZE - 1)) {
+ ERROR("ioctl: failed to get proper context. %d\n", (int)value);
+ ret = -EINVAL;
+ } else {
+ // task_id & context_id
+ DEBUG("add context. ctx_id: %d\n", (int)value);
+
+ context_add((uintptr_t)file, value);
+ if (copy_to_user((void *)arg, &value, sizeof(uint32_t))) {
+ ERROR("ioctl: failed to copy data to user.\n");
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case IOCTL_CMD_SECURE_BUFFER:
+ {
+ uint32_t offset = 0;
+ struct ioctl_data opaque;
+
+ DEBUG("read data into small buffer\n");
+ if (copy_from_user(&opaque, (void *)arg, sizeof(struct ioctl_data))) {
+ ERROR("ioctl: failed to copy data from user\n");
+ ret = -EIO;
+ break;
+ }
+
+ value = secure_device_memory(opaque.ctx_index, opaque.buffer_size, 0, &offset);
+ if (value < 0) {
+ DEBUG("failed to get available memory\n");
+ ret = -EINVAL;
+ } else {
+ opaque.mem_offset = offset;
+ if (copy_to_user((void *)arg, &opaque, sizeof(struct ioctl_data))) {
+ ERROR("ioctl: failed to copy data to user.\n");
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case IOCTL_CMD_TRY_SECURE_BUFFER:
+ {
+ uint32_t offset = 0;
+ struct ioctl_data opaque;
+
+ DEBUG("read data into small buffer\n");
+ if (copy_from_user(&opaque, (void *)arg, sizeof(struct ioctl_data))) {
+ ERROR("ioctl: failed to copy data from user\n");
+ ret = -EIO;
+ break;
+ }
+
+ value = secure_device_memory(opaque.ctx_index, opaque.buffer_size, 1, &offset);
+ if (value < 0) {
+ DEBUG("failed to get available memory\n");
+ ret = -EINVAL;
+ } else {
+ opaque.mem_offset = offset;
+ if (copy_to_user((void *)arg, &opaque, sizeof(struct ioctl_data))) {
+ ERROR("ioctl: failed to copy data to user.\n");
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case IOCTL_CMD_RELEASE_BUFFER:
+ {
+ uint32_t mem_offset;
+
+ if (copy_from_user(&mem_offset, (void *)arg, sizeof(uint32_t))) {
+ ERROR("ioctl: failed to copy data from user\n");
+ ret = -EIO;
+ break;
+ }
+ ret = release_device_memory(mem_offset);
+ if (ret < 0) {
+ ERROR("failed to release device memory\n");
+ }
+ break;
+ }
+ case IOCTL_CMD_INVOKE_API_AND_GET_DATA:
+ {
+ struct ioctl_data opaque = { 0, };
+
+ if (copy_from_user(&opaque, (void *)arg, sizeof(struct ioctl_data))) {
+ ERROR("failed to get codec parameter info from user\n");
+ ret = -EIO;
+ break;
+ }
+
+ ret = invoke_api_and_release_buffer(&opaque);
+ if (ret < 0) {
+ ERROR("failed to invoke API : [%d]\n", opaque.api_index);
+ }
+
+ if (opaque.buffer_size != -1) {
+ ret = put_data_into_buffer(&opaque);
+ if (ret < 0) {
+ ret = -EIO;
+ break;
+ }
+
+ if (copy_to_user((void *)arg, &opaque, sizeof(struct ioctl_data))) {
+ ERROR("ioctl: failed to copy data to user.\n");
+ ret = -EIO;
+ }
+ }
+ break;
+ }
+ case IOCTL_CMD_GET_PROFILE_STATUS:
+ {
+ DEBUG("%s profile status: %d\n", DEVICE_NAME, brillcodec_device->enable_profile);
+
+ if (copy_to_user((void *)arg, &brillcodec_device->enable_profile, sizeof(uint8_t))) {
+ ERROR("ioctl: failed to copy data to user\n");
+ ret = -EIO;
+ }
+ break;
+ }
+ default:
+ DEBUG("no available command.");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int invoke_api_and_release_buffer(struct ioctl_data *data)
+{
+ int api_index, ctx_index;
+ unsigned long flags;
+ int ret = 0;
+
+ DEBUG("enter %s\n", __func__);
+
+ api_index = data->api_index;
+ ctx_index = data->ctx_index;
+
+ switch (api_index) {
+ case INIT:
+ case DECODE_VIDEO:
+ case ENCODE_VIDEO:
+ case DECODE_AUDIO:
+ case ENCODE_AUDIO:
+ case DECODE_VIDEO_AND_PICTURE_COPY:
+ {
+ ENTER_CRITICAL_SECTION(flags);
+ writel((uint32_t)data->mem_offset,
+ brillcodec_device->ioaddr + DEVICE_CMD_DEVICE_MEM_OFFSET);
+ writel((int32_t)data->ctx_index,
+ brillcodec_device->ioaddr + DEVICE_CMD_CONTEXT_INDEX);
+ writel((int32_t)data->api_index,
+ brillcodec_device->ioaddr + DEVICE_CMD_API_INDEX);
+ LEAVE_CRITICAL_SECTION(flags);
+
+ if (!is_memory_monopolizing_api(api_index)) {
+ ret = release_device_memory(data->mem_offset);
+ }
+
+ if (ret < 0) {
+ ERROR("failed to release device memory\n");
+ }
+
+ break;
+ }
+ case PICTURE_COPY:
+ case DEINIT:
+ case FLUSH_BUFFERS:
+ {
+ ENTER_CRITICAL_SECTION(flags);
+ writel((int32_t)data->ctx_index,
+ brillcodec_device->ioaddr + DEVICE_CMD_CONTEXT_INDEX);
+ writel((int32_t)data->api_index,
+ brillcodec_device->ioaddr + DEVICE_CMD_API_INDEX);
+ LEAVE_CRITICAL_SECTION(flags);
+
+ break;
+ }
+ default:
+ DEBUG("invalid API commands: %d", api_index);
+ return -1;
+ }
+
+ wait_event_interruptible(wait_queue, context_flags[ctx_index] != 0);
+ context_flags[ctx_index] = 0;
+
+ if (api_index == DEINIT) {
+ dispose_device_memory(data->ctx_index);
+ }
+
+ DEBUG("leave %s\n", __func__);
+
+ return ret;
+}
+
+static int brillcodec_mmap(struct file *file, struct vm_area_struct *vm)
+{
+ unsigned long off;
+ unsigned long phys_addr;
+ unsigned long size;
+ int ret = -1;
+
+ size = vm->vm_end - vm->vm_start;
+ if (size > brillcodec_device->mem_size) {
+ ERROR("over mapping size\n");
+ return -EINVAL;
+ }
+ off = vm->vm_pgoff << PAGE_SHIFT;
+ phys_addr = (PAGE_ALIGN(brillcodec_device->mem_start) + off) >> PAGE_SHIFT;
+
+ /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
+ ret = remap_pfn_range(vm, vm->vm_start, phys_addr,
+ size, vm->vm_page_prot);
+ if (ret < 0) {
+ ERROR("failed to remap page range\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static irqreturn_t irq_handler(int irq, void *dev_id)
+{
+ struct brillcodec_device *dev = (struct brillcodec_device *)dev_id;
+ unsigned long flags = 0;
+ int val = 0;
+
+ val = readl(dev->ioaddr + DEVICE_CMD_GET_THREAD_STATE);
+ if (!(val & CODEC_IRQ_TASK)) {
+ return IRQ_NONE;
+ }
+
+ DEBUG("handle an interrupt from codec device.\n");
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ DEBUG("add bottom-half function to codec_workqueue\n");
+ queue_work(bh_workqueue, &bh_work);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void context_add(uintptr_t user_pid, uint32_t ctx_id)
+{
+ struct list_head *pos, *temp;
+ struct user_process_id *pid_elem = NULL;
+ struct context_id *cid_elem = NULL;
+ unsigned long flags;
+
+ DEBUG("enter: %s\n", __func__);
+
+ DEBUG("before inserting context. user_pid: %x, ctx_id: %d\n",
+ user_pid, ctx_id);
+
+ ENTER_CRITICAL_SECTION(flags);
+ if (!list_empty(&brillcodec_device->user_pid_mgr)) {
+ list_for_each_safe(pos, temp, &brillcodec_device->user_pid_mgr) {
+ pid_elem = list_entry(pos, struct user_process_id, pid_node);
+
+ DEBUG("add context. pid_elem: %p\n", pid_elem);
+ if (pid_elem && pid_elem->id == user_pid) {
+
+ DEBUG("add context. user_pid: %x, ctx_id: %d\n",
+ user_pid, ctx_id);
+
+ cid_elem = kzalloc(sizeof(struct context_id), GFP_KERNEL);
+ if (!cid_elem) {
+ ERROR("failed to allocate context_mgr memory\n");
+ return;
+ }
+
+ INIT_LIST_HEAD(&cid_elem->node);
+
+ DEBUG("add context. user_pid: %x, pid_elem: %p, cid_elem: %p, node: %p\n",
+ user_pid, pid_elem, cid_elem, &cid_elem->node);
+
+ cid_elem->id = ctx_id;
+ list_add_tail(&cid_elem->node, &pid_elem->ctx_id_mgr);
+ }
+ }
+ } else {
+ DEBUG("user_pid_mgr is empty\n");
+ }
+ LEAVE_CRITICAL_SECTION(flags);
+
+ DEBUG("leave: %s\n", __func__);
+}
+
+static void brillcodec_context_remove(struct user_process_id *pid_elem)
+{
+ struct list_head *pos, *temp;
+ struct context_id *cid_elem = NULL;
+
+ DEBUG("enter: %s\n", __func__);
+
+ if (!list_empty(&pid_elem->ctx_id_mgr)) {
+ list_for_each_safe(pos, temp, &pid_elem->ctx_id_mgr) {
+ cid_elem = list_entry(pos, struct context_id, node);
+ if (cid_elem) {
+ if (cid_elem->id > 0 && cid_elem->id < CODEC_CONTEXT_SIZE) {
+ DEBUG("remove context. ctx_id: %d\n", cid_elem->id);
+ writel(cid_elem->id,
+ brillcodec_device->ioaddr + DEVICE_CMD_RELEASE_CONTEXT);
+ dispose_device_memory(cid_elem->id);
+ }
+
+ DEBUG("delete node from ctx_id_mgr. %p\n", &cid_elem->node);
+ __list_del_entry(&cid_elem->node);
+ DEBUG("release cid_elem. %p\n", cid_elem);
+ kfree(cid_elem);
+ } else {
+ DEBUG("no context in the pid_elem\n");
+ }
+ }
+ } else {
+ DEBUG("ctx_id_mgr is empty. user_pid: %x\n", pid_elem->id);
+ }
+ DEBUG("leave: %s\n", __func__);
+}
+
+static void task_add(uintptr_t user_pid)
+{
+ struct user_process_id *pid_elem = NULL;
+ unsigned long flags;
+
+ DEBUG("enter: %s\n", __func__);
+
+ ENTER_CRITICAL_SECTION(flags);
+ pid_elem = kzalloc(sizeof(struct user_process_id), GFP_KERNEL);
+ if (!pid_elem) {
+ ERROR("failed to allocate user_process memory\n");
+ return;
+ }
+
+ INIT_LIST_HEAD(&pid_elem->pid_node);
+ INIT_LIST_HEAD(&pid_elem->ctx_id_mgr);
+
+ DEBUG("add task. user_pid: %x, pid_elem: %p, pid_node: %p\n",
+ user_pid, pid_elem, &pid_elem->pid_node);
+ pid_elem->id = user_pid;
+ list_add_tail(&pid_elem->pid_node, &brillcodec_device->user_pid_mgr);
+ LEAVE_CRITICAL_SECTION(flags);
+
+ DEBUG("leave: %s\n", __func__);
+}
+
+static void task_remove(uintptr_t user_pid)
+{
+ struct list_head *pos, *temp;
+ struct user_process_id *pid_elem = NULL;
+ unsigned long flags;
+
+ DEBUG("enter: %s\n", __func__);
+
+ ENTER_CRITICAL_SECTION(flags);
+ if (!list_empty(&brillcodec_device->user_pid_mgr)) {
+ list_for_each_safe(pos, temp, &brillcodec_device->user_pid_mgr) {
+ pid_elem = list_entry(pos, struct user_process_id, pid_node);
+ if (pid_elem) {
+ if (pid_elem->id == user_pid) {
+ // remove task and codec contexts that is running in the task.
+ DEBUG("remove task. user_pid: %x, pid_elem: %p\n",
+ user_pid, pid_elem);
+ brillcodec_context_remove(pid_elem);
+ }
+
+ DEBUG("move pid_node from user_pid_mgr. %p\n", &pid_elem->pid_node);
+ __list_del_entry(&pid_elem->pid_node);
+ DEBUG("release pid_elem. %p\n", pid_elem);
+ kfree(pid_elem);
+ } else {
+ DEBUG("no task in the user_pid_mgr\n");
+ }
+ }
+ } else {
+ DEBUG("user_pid_mgr is empty\n");
+ }
+ LEAVE_CRITICAL_SECTION(flags);
+
+ DEBUG("leave: %s\n", __func__);
+}
+
+
+static int brillcodec_open(struct inode *inode, struct file *file)
+{
+ DEBUG("open! struct file: %p\n", file);
+
+ /* register interrupt handler */
+ if (request_irq(brillcodec_device->dev->irq, irq_handler,
+ IRQF_SHARED, DEVICE_NAME, brillcodec_device)) {
+ ERROR("failed to register irq handle\n");
+ return -EBUSY;
+ }
+
+ task_add((uintptr_t)file);
+
+ try_module_get(THIS_MODULE);
+
+ return 0;
+}
+
+static int brillcodec_release(struct inode *inode, struct file *file)
+{
+ DEBUG("close! struct file: %p\n", file);
+
+ /* free irq */
+ if (brillcodec_device->dev->irq) {
+ DEBUG("free registered irq\n");
+ free_irq(brillcodec_device->dev->irq, brillcodec_device);
+ }
+
+ DEBUG("before removing task: %x\n", (uint32_t)file);
+ /* free resource */
+ task_remove((uintptr_t)file);
+
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+/* define file opertion for CODEC */
+const struct file_operations brillcodec_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = brillcodec_ioctl,
+ .open = brillcodec_open,
+ .mmap = brillcodec_mmap,
+ .release = brillcodec_release,
+};
+
+static struct miscdevice codec_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = DEVICE_NAME,
+ .fops = &brillcodec_fops,
+ .mode = S_IRUGO | S_IWUGO,
+};
+
+static bool get_device_info(void)
+{
+ uint32_t info = readl(brillcodec_device->ioaddr + DEVICE_CMD_GET_DEVICE_INFO);
+
+ brillcodec_device->major_version = (uint32_t)((info & 0x0000FF00) >> 8);
+ brillcodec_device->minor_version = (uint8_t)(info & 0x000000FF);
+
+ if (brillcodec_device->major_version != DRIVER_VERSION) {
+ ERROR("Version mismatch. driver version [%d], device version [%d.%d].\n",
+ DRIVER_VERSION, brillcodec_device->major_version,
+ brillcodec_device->minor_version);
+ return false;
+ }
+
+ // check memory monopolizing API
+ brillcodec_device->memory_monopolizing = (info & 0xFFFF0000) >> 16;
+
+ // check profile status
+ info = readl(brillcodec_device->ioaddr + DEVICE_CMD_GET_PROFILE_STATUS);
+ brillcodec_device->enable_profile = (uint8_t)info;
+
+ return true;
+}
+
+static int brillcodec_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
+{
+ int ret = 0;
+ int index = 0;
+
+ brillcodec_device =
+ kzalloc(sizeof(struct brillcodec_device), GFP_KERNEL);
+ if (!brillcodec_device) {
+ ERROR("Failed to allocate memory for codec.\n");
+ return -ENOMEM;
+ }
+
+ brillcodec_device->dev = pci_dev;
+
+ INIT_LIST_HEAD(&brillcodec_device->user_pid_mgr);
+
+ // initialize memory block structures
+ brillcodec_device->memory_blocks[0].unit_size = CODEC_S_DEVICE_MEM_SIZE;
+ brillcodec_device->memory_blocks[0].n_units = CODEC_S_DEVICE_MEM_COUNT;
+ brillcodec_device->memory_blocks[1].unit_size = CODEC_M_DEVICE_MEM_SIZE;
+ brillcodec_device->memory_blocks[1].n_units = CODEC_M_DEVICE_MEM_COUNT;
+ brillcodec_device->memory_blocks[2].unit_size = CODEC_L_DEVICE_MEM_SIZE;
+ brillcodec_device->memory_blocks[2].n_units = CODEC_L_DEVICE_MEM_COUNT;
+
+ for (index = 0; index < 3; ++index) {
+ struct memory_block *block = &brillcodec_device->memory_blocks[index];
+ block->units =
+ kzalloc(sizeof(struct device_mem) * block->n_units, GFP_KERNEL);
+
+ block->last_buf_secured = 0;
+
+ INIT_LIST_HEAD(&block->available);
+ INIT_LIST_HEAD(&block->occupied);
+ sema_init(&block->semaphore, block->n_units - 1);
+ sema_init(&block->last_buf_semaphore, 1);
+ mutex_init(&block->access_mutex);
+ }
+
+ divide_device_memory();
+
+ spin_lock_init(&brillcodec_device->lock);
+
+ if ((ret = pci_enable_device(pci_dev))) {
+ ERROR("pci_enable_device failed\n");
+ return ret;
+ }
+ pci_set_master(pci_dev);
+
+ brillcodec_device->mem_start = pci_resource_start(pci_dev, 0);
+ brillcodec_device->mem_size = pci_resource_len(pci_dev, 0);
+ if (!brillcodec_device->mem_start) {
+ ERROR("pci_resource_start failed\n");
+ pci_disable_device(pci_dev);
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(brillcodec_device->mem_start,
+ brillcodec_device->mem_size,
+ DEVICE_NAME)) {
+ ERROR("request_mem_region failed\n");
+ pci_disable_device(pci_dev);
+ return -EINVAL;
+ }
+
+ brillcodec_device->io_start = pci_resource_start(pci_dev, 1);
+ brillcodec_device->io_size = pci_resource_len(pci_dev, 1);
+ if (!brillcodec_device->io_start) {
+ ERROR("pci_resource_start failed\n");
+ release_mem_region(brillcodec_device->mem_start, brillcodec_device->mem_size);
+ pci_disable_device(pci_dev);
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(brillcodec_device->io_start,
+ brillcodec_device->io_size,
+ DEVICE_NAME)) {
+ ERROR("request_io_region failed\n");
+ release_mem_region(brillcodec_device->mem_start, brillcodec_device->mem_size);
+ pci_disable_device(pci_dev);
+ return -EINVAL;
+ }
+
+ brillcodec_device->ioaddr =
+ ioremap_nocache(brillcodec_device->io_start, brillcodec_device->io_size);
+ if (!brillcodec_device->ioaddr) {
+ ERROR("ioremap failed\n");
+ release_mem_region(brillcodec_device->io_start, brillcodec_device->io_size);
+ release_mem_region(brillcodec_device->mem_start, brillcodec_device->mem_size);
+ pci_disable_device(pci_dev);
+ return -EINVAL;
+ }
+
+ if (!get_device_info()) {
+ return -EINVAL;
+ }
+
+ if ((ret = misc_register(&codec_dev))) {
+ ERROR("cannot register codec as misc\n");
+ iounmap(brillcodec_device->ioaddr);
+ release_mem_region(brillcodec_device->io_start, brillcodec_device->io_size);
+ release_mem_region(brillcodec_device->mem_start, brillcodec_device->mem_size);
+ pci_disable_device(pci_dev);
+ return ret;
+ }
+
+ printk(KERN_INFO "%s: driver is probed. driver version [%d], device version [%d.%d]\n",
+ DEVICE_NAME, DRIVER_VERSION, brillcodec_device->major_version,
+ brillcodec_device->minor_version);
+
+ if (brillcodec_device->enable_profile) {
+ printk(KERN_INFO "%s: profile enabled\n", DEVICE_NAME);
+ }
+
+ return 0;
+}
+
+static void brillcodec_remove(struct pci_dev *pci_dev)
+{
+ if (brillcodec_device) {
+ if (brillcodec_device->ioaddr) {
+ iounmap(brillcodec_device->ioaddr);
+ brillcodec_device->ioaddr = NULL;
+ }
+
+ if (brillcodec_device->io_start) {
+ release_mem_region(brillcodec_device->io_start,
+ brillcodec_device->io_size);
+ brillcodec_device->io_start = 0;
+ }
+
+ if (brillcodec_device->mem_start) {
+ release_mem_region(brillcodec_device->mem_start,
+ brillcodec_device->mem_size);
+ brillcodec_device->mem_start = 0;
+ }
+
+/*
+ if (brillcodec_device->units) {
+// FIXME
+// kfree(brillcodec_device->elem);
+ brillcodec_device->units= NULL;
+ }
+*/
+
+ if (brillcodec_device->codec_elem.buf) {
+ kfree(brillcodec_device->codec_elem.buf);
+ brillcodec_device->codec_elem.buf = NULL;
+ }
+
+ kfree(brillcodec_device);
+ }
+
+ misc_deregister(&codec_dev);
+ pci_disable_device(pci_dev);
+}
+
+static struct pci_device_id brillcodec_pci_table[] = {
+ {
+ .vendor = PCI_VENDOR_ID_TIZEN_EMUL,
+ .device = PCI_DEVICE_ID_VIRTUAL_BRILL_CODEC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, brillcodec_pci_table);
+
+static struct pci_driver driver = {
+ .name = DEVICE_NAME,
+ .id_table = brillcodec_pci_table,
+ .probe = brillcodec_probe,
+ .remove = brillcodec_remove,
+};
+
+static int __init brillcodec_init(void)
+{
+ printk(KERN_INFO "%s: driver is initialized.\n", DEVICE_NAME);
+
+ bh_workqueue = create_workqueue ("maru_brillcodec");
+ if (!bh_workqueue) {
+ ERROR("failed to allocate workqueue\n");
+ return -ENOMEM;
+ }
+
+ return pci_register_driver(&driver);
+}
+
+static void __exit brillcodec_exit(void)
+{
+ printk(KERN_INFO "%s: driver is finalized.\n", DEVICE_NAME);
+
+ if (bh_workqueue) {
+ destroy_workqueue (bh_workqueue);
+ bh_workqueue = NULL;
+ }
+ pci_unregister_driver(&driver);
+}
+module_init(brillcodec_init);
+module_exit(brillcodec_exit);
--- /dev/null
+/*
+ * MARU Virtual Camera Driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Jinhyung Jo <jinhyung.jo@samsung.com>
+ * Sangho Park <sangho1206.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+/*
+ * Some code based on vivi driver or videobuf_vmalloc.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/version.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <linux/interrupt.h>
+#include <linux/videodev2.h>
+#include <media/videobuf-core.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+
+#define MARUCAM_DEBUG_LEVEL 0
+
+static unsigned debug;
+
+#define MARUCAM_MODULE_NAME "marucam"
+
+#define marucam_err(fmt, arg...) \
+ printk(KERN_ERR "%s: error [%s:%d]: " fmt, MARUCAM_MODULE_NAME, \
+ __func__, __LINE__, ##arg)
+
+#define marucam_warn(fmt, arg...) \
+ printk(KERN_WARNING "%s: " fmt, MARUCAM_MODULE_NAME, ##arg)
+
+#define marucam_info(fmt, arg...) \
+ printk(KERN_INFO "%s: " fmt, MARUCAM_MODULE_NAME, ##arg)
+
+#define marucam_dbg(level, fmt, arg...) \
+ do { \
+ if (debug >= (level)) { \
+ printk(KERN_DEBUG "%s: [%s:%d]: " fmt, \
+ MARUCAM_MODULE_NAME, \
+ __func__, __LINE__, ##arg); \
+ } \
+ } while (0)
+
+#define MARUCAM_MAJOR_VERSION 1
+#define MARUCAM_MINOR_VERSION 0
+#define MARUCAM_RELEASE 2
+#define MARUCAM_VERSION \
+ KERNEL_VERSION(MARUCAM_MAJOR_VERSION, \
+ MARUCAM_MINOR_VERSION, MARUCAM_RELEASE)
+
+MODULE_DESCRIPTION("MARU Virtual Camera Driver");
+MODULE_AUTHOR("Jinhyung Jo <jinhyung.jo@samsung.com>");
+MODULE_LICENSE("GPL");
+
+/*
+ * Basic structures
+ */
+#define MARUCAM_INIT 0x00
+#define MARUCAM_OPEN 0x04
+#define MARUCAM_CLOSE 0x08
+#define MARUCAM_ISR 0x0C
+#define MARUCAM_STREAMON 0x10
+#define MARUCAM_STREAMOFF 0x14
+#define MARUCAM_S_PARM 0x18
+#define MARUCAM_G_PARM 0x1C
+#define MARUCAM_ENUM_FMT 0x20
+#define MARUCAM_TRY_FMT 0x24
+#define MARUCAM_S_FMT 0x28
+#define MARUCAM_G_FMT 0x2C
+#define MARUCAM_QUERYCTRL 0x30
+#define MARUCAM_S_CTRL 0x34
+#define MARUCAM_G_CTRL 0x38
+#define MARUCAM_ENUM_FSIZES 0x3C
+#define MARUCAM_ENUM_FINTV 0x40
+#define MARUCAM_REQFRAME 0x44
+#define MARUCAM_EXIT 0x48
+
+enum marucam_opstate {
+ S_IDLE = 0,
+ S_RUNNING = 1
+};
+
+struct marucam_device {
+ struct v4l2_device v4l2_dev;
+
+ unsigned char dev_index;
+ spinlock_t slock;
+ struct mutex mlock;
+ enum marucam_opstate opstate;
+ unsigned int in_use;
+
+ struct video_device *vfd;
+ struct pci_dev *pdev;
+
+ void __iomem *mmregs;
+ void __iomem *args;
+ resource_size_t mem_base;
+ resource_size_t mem_size;
+ resource_size_t iomem_size;
+
+ enum v4l2_buf_type type;
+ unsigned int width;
+ unsigned int height;
+ unsigned int pixelformat;
+ struct videobuf_queue vb_vidq;
+
+ struct list_head active;
+};
+
+/*
+ * Use only one instance.
+ */
+static struct marucam_device *marucam_instance[2];
+
+/*
+ * The code below has been modified from 'videobuf_vmalloc.c'.
+ */
+
+#define MAGIC_MARUCAM_MEM 0x18221223
+
+#define MAGIC_CHECK(is, should) \
+ do { \
+ if (unlikely((is) != (should))) { \
+ marucam_err("invalid magic number:" \
+ " %x (expected %x)\n", is, should); \
+ BUG(); \
+ } \
+ } while (0)
+
+struct videobuf_marucam_memory {
+ u32 magic;
+ u32 mapped;
+};
+
+static void videobuf_vm_open(struct vm_area_struct *vma)
+{
+ struct videobuf_mapping *map = vma->vm_private_data;
+
+ map->count++;
+}
+
+static void videobuf_vm_close(struct vm_area_struct *vma)
+{
+ int i;
+ struct videobuf_mapping *map = vma->vm_private_data;
+ struct videobuf_queue *q = map->q;
+
+ map->count--;
+ if (0 == map->count) {
+ struct videobuf_marucam_memory *mem;
+
+ mutex_lock(&q->vb_lock);
+
+ if (q->streaming)
+ videobuf_queue_cancel(q);
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+
+ if (q->bufs[i]->map != map)
+ continue;
+
+ mem = q->bufs[i]->priv;
+ if (mem) {
+ MAGIC_CHECK(mem->magic, MAGIC_MARUCAM_MEM);
+ mem->mapped = 0;
+ }
+
+ q->bufs[i]->map = NULL;
+ q->bufs[i]->baddr = 0;
+ }
+
+ kfree(map);
+
+ mutex_unlock(&q->vb_lock);
+ }
+
+ return;
+}
+
+static const struct vm_operations_struct videobuf_vm_ops = {
+ .open = videobuf_vm_open,
+ .close = videobuf_vm_close,
+};
+
+static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
+{
+ struct videobuf_marucam_memory *mem;
+ struct videobuf_buffer *vb;
+
+ vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
+ if (vb == NULL) {
+ marucam_err("memory allocation failed for a video buffer\n");
+ return vb;
+ }
+
+ mem = vb->priv = ((char *)vb) + size;
+ mem->magic = MAGIC_MARUCAM_MEM;
+
+ return vb;
+}
+
+static int __videobuf_iolock(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ struct v4l2_framebuffer *fbuf)
+{
+ struct videobuf_marucam_memory *mem = vb->priv;
+
+ BUG_ON(!mem);
+
+ MAGIC_CHECK(mem->magic, MAGIC_MARUCAM_MEM);
+
+ switch (vb->memory) {
+ case V4L2_MEMORY_MMAP:
+ if (!mem->mapped) {
+ marucam_err("memory is not mapped\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ marucam_err("V4L2_MEMORY_MMAP only supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __videobuf_mmap_mapper(struct videobuf_queue *q,
+ struct videobuf_buffer *buf, struct vm_area_struct *vma)
+{
+ struct videobuf_marucam_memory *mem;
+ struct videobuf_mapping *map;
+ int retval, pages;
+
+ map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
+ if (NULL == map) {
+ marucam_err("memory allocation failed for a video buffer mapping\n");
+ return -ENOMEM;
+ }
+
+ buf->map = map;
+ map->q = q;
+
+ buf->baddr = vma->vm_start;
+
+ mem = buf->priv;
+ BUG_ON(!mem);
+ mem->mapped = 1;
+ MAGIC_CHECK(mem->magic, MAGIC_MARUCAM_MEM);
+
+ pages = PAGE_ALIGN(vma->vm_end - vma->vm_start);
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ retval = remap_pfn_range(vma, vma->vm_start,
+ (((struct marucam_device *)q->priv_data)->mem_base
+ + vma->vm_pgoff) >> PAGE_SHIFT,
+ pages, vma->vm_page_prot);
+ if (retval < 0) {
+ marucam_err("remap failed: %d\n", retval);
+ mem->mapped = 0;
+ mem = NULL;
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ vma->vm_ops = &videobuf_vm_ops;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = map;
+
+ videobuf_vm_open(vma);
+
+ return 0;
+}
+
+static struct videobuf_qtype_ops qops = {
+ .magic = MAGIC_QTYPE_OPS,
+ .alloc_vb = __videobuf_alloc_vb,
+ .iolock = __videobuf_iolock,
+ .mmap_mapper = __videobuf_mmap_mapper,
+};
+
+void videobuf_queue_marucam_init(struct videobuf_queue *q,
+ struct videobuf_queue_ops *ops,
+ void *dev,
+ spinlock_t *irqlock,
+ enum v4l2_buf_type type,
+ enum v4l2_field field,
+ unsigned int msize,
+ void *priv,
+ struct mutex *ext_lock)
+{
+ videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
+ priv, &qops, ext_lock);
+}
+
+
+/*
+ * interrupt handling
+ */
+
+static int get_image_size(struct marucam_device *dev)
+{
+ int size;
+
+ switch (dev->pixelformat) {
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_BGR24:
+ size = dev->width * dev->height * 3;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_NV12:
+ size = (dev->width * dev->height * 3) / 2;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ default:
+ size = dev->width * dev->height * 2;
+ break;
+ }
+
+ return size;
+}
+
+static void marucam_fillbuf(struct marucam_device *dev, uint32_t isr)
+{
+ struct videobuf_queue *q1 = &dev->vb_vidq;
+ struct videobuf_buffer *buf = NULL;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(q1->irqlock, flags);
+ if (dev->opstate != S_RUNNING) {
+ marucam_err("state is not S_RUNNING\n");
+ spin_unlock_irqrestore(q1->irqlock, flags);
+ return;
+ }
+ if (list_empty(&dev->active)) {
+ marucam_err("active list is empty\n");
+ spin_unlock_irqrestore(q1->irqlock, flags);
+ return;
+ }
+
+ buf = list_entry(dev->active.next, struct videobuf_buffer, queue);
+ if (!waitqueue_active(&buf->done)) {
+ marucam_err("wait queue list is empty\n");
+ spin_unlock_irqrestore(q1->irqlock, flags);
+ return;
+ }
+
+ list_del(&buf->queue);
+
+ if (isr & 0x08) {
+ marucam_err("invalid state\n");
+ buf->state = 0xFF; /* invalid state */
+ } else {
+ marucam_dbg(2, "video buffer is filled\n");
+ buf->state = VIDEOBUF_DONE;
+ }
+ do_gettimeofday(&buf->ts);
+ buf->field_count++;
+ wake_up_interruptible(&buf->done);
+ spin_unlock_irqrestore(q1->irqlock, flags);
+}
+
+static irqreturn_t marucam_irq_handler(int irq, void *dev_id)
+{
+ struct marucam_device *dev = dev_id;
+ uint32_t isr = 0;
+
+ isr = ioread32(dev->mmregs + MARUCAM_ISR);
+ if (!isr) {
+ marucam_dbg(1, "mismatched irq\n");
+ return IRQ_NONE;
+ }
+
+ marucam_fillbuf(dev, isr);
+ return IRQ_HANDLED;
+}
+
+/*
+ * IOCTL vidioc handling
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct marucam_device *dev = priv;
+
+ strcpy(cap->driver, MARUCAM_MODULE_NAME);
+ strcpy(cap->card, MARUCAM_MODULE_NAME);
+ strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info));
+ cap->version = MARUCAM_VERSION;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ unsigned int ret;
+ struct marucam_device *dev = priv;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ marucam_err("buf type is not V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->mlock);
+ memset_io(dev->args, 0x00, dev->iomem_size);
+ memcpy_toio(dev->args, (const void *)f, sizeof(struct v4l2_fmtdesc));
+ iowrite32(0, dev->mmregs + MARUCAM_ENUM_FMT);
+ ret = ioread32(dev->mmregs + MARUCAM_ENUM_FMT);
+ if (ret) {
+ if (ret != EINVAL) {
+ marucam_err("enum_fmt failed: ret(%d), idx(%u)\n",
+ ret, f->index);
+ }
+ mutex_unlock(&dev->mlock);
+ return -ret;
+ }
+ memcpy_fromio((void *)f, dev->args, sizeof(struct v4l2_fmtdesc));
+ mutex_unlock(&dev->mlock);
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ unsigned int ret;
+ struct marucam_device *dev = priv;
+ struct v4l2_pix_format *pf = &f->fmt.pix;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ marucam_err("buf type is not V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->mlock);
+ memset_io(dev->args, 0x00, dev->iomem_size);
+ memcpy_toio(dev->args, (const void *)pf, sizeof(struct v4l2_format));
+ iowrite32(0, dev->mmregs + MARUCAM_G_FMT);
+ ret = ioread32(dev->mmregs + MARUCAM_G_FMT);
+ if (ret) {
+ marucam_err("g_fmt failed: ret(%d)\n", ret);
+ mutex_unlock(&dev->mlock);
+ return -ret;
+ }
+ memcpy_fromio((void *)pf, dev->args, sizeof(struct v4l2_format));
+
+ dev->pixelformat = pf->pixelformat;
+ dev->width = pf->width;
+ dev->height = pf->height;
+ dev->vb_vidq.field = pf->field;
+ dev->type = f->type;
+
+ mutex_unlock(&dev->mlock);
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ unsigned int ret;
+ struct marucam_device *dev = priv;
+ struct v4l2_pix_format *pf = &f->fmt.pix;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ marucam_err("buf type is not V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->mlock);
+ memset_io(dev->args, 0x00, dev->iomem_size);
+ memcpy_toio(dev->args, (const void *)pf, sizeof(struct v4l2_format));
+ iowrite32(0, dev->mmregs + MARUCAM_TRY_FMT);
+ ret = ioread32(dev->mmregs + MARUCAM_TRY_FMT);
+ if (ret) {
+ marucam_err("try_fmt failed: ret(%d), wxh(%ux%u), pf(0x%x)\n",
+ ret, pf->width, pf->height,
+ pf->pixelformat);
+ mutex_unlock(&dev->mlock);
+ return -ret;
+ }
+ memcpy_fromio((void *)pf, dev->args, sizeof(struct v4l2_format));
+ mutex_unlock(&dev->mlock);
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ unsigned int ret;
+ struct marucam_device *dev = priv;
+ struct videobuf_queue *q2 = &dev->vb_vidq;
+ struct v4l2_pix_format *pf = &f->fmt.pix;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ marucam_err("buf type is not V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->mlock);
+ if (dev->opstate != S_IDLE) {
+ marucam_err("state is not S_IDLE\n");
+ mutex_unlock(&dev->mlock);
+ return -EBUSY;
+ }
+ mutex_lock(&q2->vb_lock);
+ if (videobuf_queue_is_busy(&dev->vb_vidq)) {
+ marucam_err("videobuf queue is busy\n");
+ mutex_unlock(&q2->vb_lock);
+ mutex_unlock(&dev->mlock);
+ return -EBUSY;
+ }
+ mutex_unlock(&q2->vb_lock);
+
+ memset_io(dev->args, 0x00, dev->iomem_size);
+ memcpy_toio(dev->args, (const void *)pf, sizeof(struct v4l2_format));
+
+ iowrite32(0, dev->mmregs + MARUCAM_S_FMT);
+ ret = ioread32(dev->mmregs + MARUCAM_S_FMT);
+ if (ret) {
+ marucam_err("s_fmt failed: ret(%d), wxh(%ux%u), pf(0x%x)\n",
+ ret, pf->width, pf->height,
+ pf->pixelformat);
+ mutex_unlock(&dev->mlock);
+ return -ret;
+ }
+ memcpy_fromio((void *)pf, dev->args, sizeof(struct v4l2_format));
+
+ dev->pixelformat = pf->pixelformat;
+ dev->width = pf->width;
+ dev->height = pf->height;
+ dev->vb_vidq.field = pf->field;
+ dev->type = f->type;
+
+ mutex_unlock(&dev->mlock);
+ return 0;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ int ret;
+ struct marucam_device *dev = priv;
+
+ dev->type = p->type;
+
+ ret = videobuf_reqbufs(&dev->vb_vidq, p);
+ if (ret) {
+ marucam_err("%s failed: ret(%d), count(%u), type(%u), memory(%u)\n",
+ __func__, ret, p->count,
+ p->type, p->memory);
+ }
+
+ return ret;
+}
+
+static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ int ret;
+ struct marucam_device *dev = priv;
+
+ ret = videobuf_querybuf(&dev->vb_vidq, p);
+ if (ret) {
+ marucam_err("%s failed: ret(%d), idx(%u), type(%u), memory(%u)\n",
+ __func__, ret, p->index,
+ p->type, p->memory);
+ }
+
+ return ret;
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ int ret;
+ struct marucam_device *dev = priv;
+
+ ret = videobuf_qbuf(&dev->vb_vidq, p);
+ if (ret) {
+ marucam_err("%s failed: ret(%d), idx(%u), type(%u), memory(%u)\n",
+ __func__, ret, p->index,
+ p->type, p->memory);
+ }
+
+ return ret;
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ int ret;
+ struct marucam_device *dev = priv;
+
+ ret = videobuf_dqbuf(&dev->vb_vidq, p, file->f_flags & O_NONBLOCK);
+ if (ret) {
+ marucam_err("%s failed: ret(%d), idx(%u), type(%u), memory(%u)\n",
+ __func__, ret, p->index,
+ p->type, p->memory);
+ }
+
+ return ret;
+}
+
+static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ int ret = 0;
+ unsigned int dev_ret;
+ struct marucam_device *dev = priv;
+
+ if (dev->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ marucam_err("buf type is not V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
+ return -EINVAL;
+ }
+ if (i != dev->type) {
+ marucam_err("mismatched buf type\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->mlock);
+ if (dev->opstate != S_IDLE) {
+ marucam_err("state is not S_IDLE\n");
+ mutex_unlock(&dev->mlock);
+ return -EBUSY;
+ }
+
+ memset_io(dev->args, 0x00, dev->iomem_size);
+ iowrite32(0, dev->mmregs + MARUCAM_STREAMON);
+ dev_ret = ioread32(dev->mmregs + MARUCAM_STREAMON);
+ if (dev_ret) {
+ marucam_err("stream_on failed: ret(%d)\n", dev_ret);
+ mutex_unlock(&dev->mlock);
+ return -dev_ret;
+ }
+
+ INIT_LIST_HEAD(&dev->active);
+ ret = videobuf_streamon(&dev->vb_vidq);
+ if (ret) {
+ marucam_err("videobuf_streamon() failed: ret(%d)\n", ret);
+ mutex_unlock(&dev->mlock);
+ return ret;
+ }
+
+ dev->opstate = S_RUNNING;
+ mutex_unlock(&dev->mlock);
+ return ret;
+}
+
+static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ int ret = 0;
+ unsigned int dev_ret;
+ struct marucam_device *dev = priv;
+
+ if (dev->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ marucam_err("buf type is not V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
+ return -EINVAL;
+ }
+ if (i != dev->type) {
+ marucam_err("mismatched buf type\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->mlock);
+ if (dev->opstate != S_RUNNING) {
+ marucam_err("The device state is not S_RUNNING. Do nothing\n");
+ mutex_unlock(&dev->mlock);
+ return 0;
+ }
+
+ memset_io(dev->args, 0x00, dev->iomem_size);
+ iowrite32(0, dev->mmregs + MARUCAM_STREAMOFF);
+ dev_ret = ioread32(dev->mmregs + MARUCAM_STREAMOFF);
+ if (dev_ret) {
+ marucam_err("stream_off failed: ret(%d)\n", dev_ret);
+ mutex_unlock(&dev->mlock);
+ return -dev_ret;
+ }
+
+ dev->opstate = S_IDLE;
+ ret = videobuf_streamoff(&dev->vb_vidq);
+ if (ret)
+ marucam_err("videobuf_streamoff() failed: ret(%d)\n", ret);
+
+ INIT_LIST_HEAD(&dev->active);
+ mutex_unlock(&dev->mlock);
+ return ret;
+}
+
+static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id i)
+{
+ return 0;
+}
+
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ if (inp->index != 0)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ sprintf(inp->name, "MARU Virtual Camera %u", inp->index);
+
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ return 0;
+}
+
+/* controls
+ *
+ */
+static int vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ unsigned int ret;
+ struct marucam_device *dev = priv;
+
+ mutex_lock(&dev->mlock);
+ switch (qc->id) {
+ /* we only support followed items. */
+ case V4L2_CID_BRIGHTNESS:
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_SATURATION:
+ case V4L2_CID_SHARPNESS:
+ break;
+ default:
+ mutex_unlock(&dev->mlock);
+ return -EINVAL;
+ }
+
+ memset_io(dev->args, 0x00, dev->iomem_size);
+ memcpy_toio(dev->args, (const void *)qc,
+ sizeof(struct v4l2_queryctrl));
+ iowrite32(0, dev->mmregs + MARUCAM_QUERYCTRL);
+ ret = ioread32(dev->mmregs + MARUCAM_QUERYCTRL);
+ if (ret) {
+ marucam_err("query_ctrl failed: ret(%d), id(%u)\n",
+ ret, qc->id);
+ mutex_unlock(&dev->mlock);
+ return -ret;
+ }
+ memcpy_fromio((void *)qc, dev->args,
+ sizeof(struct v4l2_queryctrl));
+
+ mutex_unlock(&dev->mlock);
+ return 0;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ unsigned int ret;
+ struct marucam_device *dev = priv;
+
+ mutex_lock(&dev->mlock);
+ memset_io(dev->args, 0x00, dev->iomem_size);
+ memcpy_toio(dev->args, (const void *)ctrl,
+ sizeof(struct v4l2_control));
+ iowrite32(0, dev->mmregs + MARUCAM_G_CTRL);
+ ret = ioread32(dev->mmregs + MARUCAM_G_CTRL);
+ if (ret) {
+ marucam_err("g_ctrl failed: ret(%d), id(%u)\n",
+ ret, ctrl->id);
+ mutex_unlock(&dev->mlock);
+ return -ret;
+ }
+ memcpy_fromio((void *)ctrl, dev->args,
+ sizeof(struct v4l2_control));
+ mutex_unlock(&dev->mlock);
+ return 0;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ unsigned int ret;
+ struct marucam_device *dev = priv;
+
+ mutex_lock(&dev->mlock);
+ memset_io(dev->args, 0x00, dev->iomem_size);
+ memcpy_toio(dev->args, (const void *)ctrl,
+ sizeof(struct v4l2_control));
+ iowrite32(0, dev->mmregs + MARUCAM_S_CTRL);
+ ret = ioread32(dev->mmregs + MARUCAM_S_CTRL);
+ if (ret) {
+ marucam_err("s_ctrl failed: ret(%d), id(%u), val(%d)\n",
+ ret, ctrl->id, ctrl->value);
+ mutex_unlock(&dev->mlock);
+ return -ret;
+ }
+ memcpy_fromio((void *)ctrl, dev->args,
+ sizeof(struct v4l2_control));
+ mutex_unlock(&dev->mlock);
+ return 0;
+}
+
+static int vidioc_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ unsigned int ret;
+ struct marucam_device *dev = priv;
+ struct v4l2_captureparm *cp = &parm->parm.capture;
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ marucam_err("buf type is not V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->mlock);
+ memset_io(dev->args, 0x00, dev->iomem_size);
+ memcpy_toio(dev->args, (const void *)cp,
+ sizeof(struct v4l2_captureparm));
+ iowrite32(0, dev->mmregs + MARUCAM_S_PARM);
+ ret = ioread32(dev->mmregs + MARUCAM_S_PARM);
+ if (ret) {
+ marucam_err("s_parm failed: ret(%d)\n", ret);
+ mutex_unlock(&dev->mlock);
+ return -ret;
+ }
+ memcpy_fromio((void *)cp, dev->args,
+ sizeof(struct v4l2_captureparm));
+ mutex_unlock(&dev->mlock);
+ return 0;
+}
+
+static int vidioc_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ unsigned int ret;
+ struct marucam_device *dev = priv;
+ struct v4l2_captureparm *cp = &parm->parm.capture;
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ marucam_err("buf type is not V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->mlock);
+ memset_io(dev->args, 0x00, dev->iomem_size);
+ memcpy_toio(dev->args, (const void *)cp,
+ sizeof(struct v4l2_captureparm));
+ iowrite32(0, dev->mmregs + MARUCAM_G_PARM);
+ ret = ioread32(dev->mmregs + MARUCAM_G_PARM);
+ if (ret) {
+ marucam_err("g_parm failed: ret(%d)\n", ret);
+ mutex_unlock(&dev->mlock);
+ return -ret;
+ }
+ memcpy_fromio((void *)cp, dev->args,
+ sizeof(struct v4l2_captureparm));
+ mutex_unlock(&dev->mlock);
+ return 0;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ unsigned int ret;
+ struct marucam_device *dev = priv;
+
+ mutex_lock(&dev->mlock);
+ memset_io(dev->args, 0x00, dev->iomem_size);
+ memcpy_toio(dev->args, (const void *)fsize,
+ sizeof(struct v4l2_frmsizeenum));
+ iowrite32(0, dev->mmregs + MARUCAM_ENUM_FSIZES);
+ ret = ioread32(dev->mmregs + MARUCAM_ENUM_FSIZES);
+ if (ret) {
+ if (ret != EINVAL) {
+ marucam_err("enum_framesizes failed: %d, index(%u), pix(%u)\n",
+ ret, fsize->index, fsize->pixel_format);
+ }
+ mutex_unlock(&dev->mlock);
+ return -ret;
+ }
+ memcpy_fromio((void *)fsize, dev->args,
+ sizeof(struct v4l2_frmsizeenum));
+ mutex_unlock(&dev->mlock);
+ return 0;
+}
+
+static int vidioc_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fival)
+{
+ unsigned int ret;
+ struct marucam_device *dev = priv;
+
+ mutex_lock(&dev->mlock);
+ memset_io(dev->args, 0x00, dev->iomem_size);
+ memcpy_toio(dev->args, (const void *)fival,
+ sizeof(struct v4l2_frmivalenum));
+ iowrite32(0, dev->mmregs + MARUCAM_ENUM_FINTV);
+ ret = ioread32(dev->mmregs + MARUCAM_ENUM_FINTV);
+ if (ret) {
+ if (ret != EINVAL) {
+ marucam_err("%s failed: ret(%d), idx(%u), pf(%u), %ux%u\n",
+ __func__, ret, fival->index,
+ fival->pixel_format, fival->width,
+ fival->height);
+ }
+ mutex_unlock(&dev->mlock);
+ return -ret;
+ }
+ memcpy_fromio((void *)fival, dev->args,
+ sizeof(struct v4l2_frmivalenum));
+ mutex_unlock(&dev->mlock);
+ return 0;
+}
+
+/* ------------------------------------------------------------------
+ Videobuf operations
+ ------------------------------------------------------------------*/
+static int buffer_setup(struct videobuf_queue *vq,
+ unsigned int *count,
+ unsigned int *size)
+{
+ struct marucam_device *dev = vq->priv_data;
+
+ *size = get_image_size(dev);
+
+ if (*count > 2)
+ *count = 2;
+ else if (*count == 0)
+ *count = 2;
+
+ marucam_dbg(1, "count=%d, size=%d\n", *count, *size);
+
+ return 0;
+}
+
+static int buffer_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ int rc;
+ struct marucam_device *dev = vq->priv_data;
+
+ marucam_dbg(1, "field=%d\n", field);
+
+ vb->size = get_image_size(dev);
+
+ if (0 != vb->baddr && vb->bsize < vb->size) {
+ marucam_err("invalid buffer size\n");
+ return -EINVAL;
+ }
+
+ if (vb->state == VIDEOBUF_NEEDS_INIT) {
+ rc = videobuf_iolock(vq, vb, NULL);
+ if (rc < 0) {
+ marucam_err("videobuf_iolock() failed: ret(%d)\n", rc);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+ return rc;
+ }
+ }
+
+ vb->width = dev->width;
+ vb->height = dev->height;
+ vb->field = field;
+ vb->state = VIDEOBUF_PREPARED;
+
+ return 0;
+}
+
+static void buffer_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct marucam_device *dev = vq->priv_data;
+
+ marucam_dbg(1, "\n");
+
+ vb->state = VIDEOBUF_QUEUED;
+ list_add_tail(&vb->queue, &dev->active);
+}
+
+static void buffer_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ marucam_dbg(1, "buffer freed\n");
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static struct videobuf_queue_ops marucam_video_qops = {
+ .buf_setup = buffer_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .buf_release = buffer_release,
+};
+
+/* ------------------------------------------------------------------
+ File operations for the device
+ ------------------------------------------------------------------*/
+
+static int marucam_open(struct file *file)
+{
+ int ret;
+ unsigned int dev_ret;
+ struct marucam_device *dev = video_drvdata(file);
+
+ file->private_data = dev;
+
+ mutex_lock(&dev->mlock);
+ if (dev->in_use) {
+ marucam_err("already opened\n");
+ mutex_unlock(&dev->mlock);
+ return -EBUSY;
+ }
+
+ dev->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dev->pixelformat = 0;
+ dev->width = 0;
+ dev->height = 0;
+
+ ret = request_irq(dev->pdev->irq, marucam_irq_handler,
+ IRQF_SHARED, MARUCAM_MODULE_NAME, dev);
+ if (ret) {
+ marucam_err("request_irq() failed: ret(%d), irq(#%d)\n",
+ ret, dev->pdev->irq);
+ mutex_unlock(&dev->mlock);
+ return ret;
+ }
+
+ videobuf_queue_marucam_init(&dev->vb_vidq, &marucam_video_qops,
+ &dev->pdev->dev, &dev->slock, dev->type,
+ V4L2_FIELD_NONE, sizeof(struct videobuf_buffer),
+ dev, NULL);
+
+ memset_io(dev->args, 0x00, dev->iomem_size);
+ iowrite32(0, dev->mmregs + MARUCAM_OPEN);
+ dev_ret = ioread32(dev->mmregs + MARUCAM_OPEN);
+ if (dev_ret) {
+ marucam_err("device open failed: ret(%d)\n", dev_ret);
+ free_irq(dev->pdev->irq, dev);
+ mutex_unlock(&dev->mlock);
+ return -dev_ret;
+ }
+
+ dev->in_use = 1;
+ mutex_unlock(&dev->mlock);
+ return 0;
+
+}
+
+static int marucam_close(struct file *file)
+{
+ unsigned int ret;
+ struct marucam_device *dev = file->private_data;
+
+ mutex_lock(&dev->mlock);
+ if (dev->opstate == S_RUNNING) {
+ marucam_err("unexpectedly terminated\n");
+ iowrite32(0, dev->mmregs + MARUCAM_STREAMOFF);
+ ret = ioread32(dev->mmregs + MARUCAM_STREAMOFF);
+ if (ret) {
+ marucam_err("stream_off failed: ret(%d)\n", ret);
+ mutex_unlock(&dev->mlock);
+ return -ret;
+ }
+
+ dev->opstate = S_IDLE;
+ }
+
+ videobuf_stop(&dev->vb_vidq);
+ videobuf_mmap_free(&dev->vb_vidq);
+ INIT_LIST_HEAD(&dev->active);
+
+ free_irq(dev->pdev->irq, dev);
+
+ memset_io(dev->args, 0x00, dev->iomem_size);
+ iowrite32(0, dev->mmregs + MARUCAM_CLOSE);
+ ret = ioread32(dev->mmregs + MARUCAM_CLOSE);
+ if (ret) {
+ marucam_err("close failed: ret(%d)\n", ret);
+ mutex_unlock(&dev->mlock);
+ return -ret;
+ }
+
+ dev->in_use = 0;
+ mutex_unlock(&dev->mlock);
+ return 0;
+}
+
+static unsigned int marucam_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ unsigned int rval = 0;
+ struct marucam_device *poll_dev = file->private_data;
+ struct videobuf_queue *q3 = &poll_dev->vb_vidq;
+ struct videobuf_buffer *vbuf = NULL;
+
+ if (q3->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return POLLERR;
+
+ mutex_lock(&q3->vb_lock);
+ if (q3->streaming) {
+ if (!list_empty(&q3->stream)) {
+ vbuf = list_entry(q3->stream.next,
+ struct videobuf_buffer, stream);
+ }
+ }
+ if (!vbuf) {
+ marucam_err("video buffer list is empty\n");
+ rval = POLLERR;
+ }
+
+ if (rval == 0) {
+ poll_wait(file, &vbuf->done, wait);
+ if (vbuf->state == VIDEOBUF_DONE ||
+ vbuf->state == VIDEOBUF_ERROR ||
+ vbuf->state == 0xFF) {
+ rval = POLLIN | POLLRDNORM;
+ } else {
+ iowrite32(vbuf->i,
+ poll_dev->mmregs + MARUCAM_REQFRAME);
+ }
+ }
+ mutex_unlock(&q3->vb_lock);
+ return rval;
+}
+
+static int marucam_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int return_val;
+ struct marucam_device *mmap_dev = file->private_data;
+
+ marucam_dbg(1, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
+
+ return_val = videobuf_mmap_mapper(&mmap_dev->vb_vidq, vma);
+
+ marucam_dbg(1, "vma start=0x%08lx, size=%ld, ret=%d\n",
+ (unsigned long)vma->vm_start,
+ (unsigned long)vma->vm_end-(unsigned long)vma->vm_start,
+ return_val);
+
+ return return_val;
+}
+
+static const struct v4l2_ioctl_ops marucam_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_g_parm = vidioc_g_parm,
+ .vidioc_s_parm = vidioc_s_parm,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
+ .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
+};
+
+static const struct v4l2_file_operations marucam_fops = {
+ .owner = THIS_MODULE,
+ .open = marucam_open,
+ .release = marucam_close,
+ .poll = marucam_poll,
+ .mmap = marucam_mmap,
+ .ioctl = video_ioctl2,
+};
+
+static struct video_device marucam_video_dev = {
+ .name = MARUCAM_MODULE_NAME,
+ .fops = &marucam_fops,
+ .ioctl_ops = &marucam_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+};
+
+/* -----------------------------------------------------------------
+ Initialization and module stuff
+ ------------------------------------------------------------------*/
+
+static const struct pci_device_id marucam_pci_id_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TIZEN, PCI_DEVICE_ID_VIRTUAL_CAMERA) },
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, marucam_pci_id_tbl);
+
+/* The following function already exist in the latest linux stable kernel.
+ * https://git.kernel.org/cgit/linux/kernel/git/stable/linux-stable.git/
+ * commit/?id=c43996f4001de629af4a4d6713782e883677e5b9
+ * This should be removed if emulator-kernel is upgraded.
+ */
+static void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
+{
+ /*
+ * Make sure the BAR is actually a memory resource, not an IO resource
+ */
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return ioremap_wc(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
+}
+
+static int marucam_pci_initdev(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int ret_val;
+ struct marucam_device *dev;
+
+ debug = MARUCAM_DEBUG_LEVEL;
+
+ if (!pci_resource_len(pdev, 0)) {
+ marucam_info("No available device\n");
+ return -ENODEV;
+ }
+
+ if (marucam_instance[0] && marucam_instance[1]) {
+ marucam_err("Two devices already exists\n");
+ return -EBUSY;
+ }
+
+ dev = kzalloc(sizeof(struct marucam_device), GFP_KERNEL);
+ if (!dev) {
+ marucam_err("Memory allocation failed for a marucam device\n");
+ return -ENOMEM;
+ }
+
+ ret_val = pci_enable_device(pdev);
+ if (ret_val) {
+ marucam_err("pci_enable_device failed\n");
+ kfree(dev);
+ return ret_val;
+ }
+
+ dev->mem_base = pci_resource_start(pdev, 0);
+ dev->mem_size = pci_resource_len(pdev, 0);
+
+ if (pci_request_region(pdev, 0, MARUCAM_MODULE_NAME)) {
+ marucam_err("request region failed for 0 bar\n");
+ pci_disable_device(pdev);
+ kfree(dev);
+ return -EBUSY;
+ }
+
+ if (pci_request_region(pdev, 1, MARUCAM_MODULE_NAME)) {
+ marucam_err("request region failed for 1 bar\n");
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+ kfree(dev);
+ return -EBUSY;
+ }
+
+ if (pci_request_region(pdev, 2, MARUCAM_MODULE_NAME)) {
+ marucam_err("request region failed for 2 bar\n");
+ pci_release_region(pdev, 0);
+ pci_release_region(pdev, 1);
+ pci_disable_device(pdev);
+ kfree(dev);
+ return -EBUSY;
+ }
+
+ dev->args = pci_ioremap_wc_bar(pdev, 1);
+ if (!dev->args) {
+ marucam_err("pci_ioremap_wc_bar failed for 1 bar\n");
+ pci_release_region(pdev, 0);
+ pci_release_region(pdev, 1);
+ pci_release_region(pdev, 2);
+ pci_disable_device(pdev);
+ kfree(dev);
+ return -EIO;
+ }
+ dev->iomem_size = pci_resource_len(pdev, 1);
+
+ dev->mmregs = pci_ioremap_wc_bar(pdev, 2);
+ if (!dev->mmregs) {
+ marucam_err("pci_ioremap_wc_bar failed for 2 bar\n");
+ iounmap(dev->args);
+ pci_release_region(pdev, 0);
+ pci_release_region(pdev, 1);
+ pci_release_region(pdev, 2);
+ pci_disable_device(pdev);
+ kfree(dev);
+ return -EIO;
+ }
+
+ dev->dev_index = ioread32(dev->mmregs + MARUCAM_INIT);
+ marucam_info("device index is %d", dev->dev_index);
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, dev);
+
+ ret_val = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret_val < 0) {
+ marucam_err("v4l2_device_register() failed: %d\n", ret_val);
+ iounmap(dev->args);
+ iounmap(dev->mmregs);
+ pci_release_region(pdev, 0);
+ pci_release_region(pdev, 1);
+ pci_release_region(pdev, 2);
+ pci_disable_device(pdev);
+ kfree(dev);
+ return ret_val;
+ }
+
+ dev->vfd = video_device_alloc();
+ if (dev->vfd == NULL) {
+ v4l2_device_unregister(&dev->v4l2_dev);
+ iounmap(dev->args);
+ iounmap(dev->mmregs);
+ pci_release_region(pdev, 0);
+ pci_release_region(pdev, 1);
+ pci_release_region(pdev, 2);
+ pci_disable_device(pdev);
+ kfree(dev);
+ return -ENOMEM;
+ }
+
+ memcpy(dev->vfd, &marucam_video_dev, sizeof(marucam_video_dev));
+ dev->vfd->dev_parent = &pdev->dev;
+ dev->vfd->v4l2_dev = &dev->v4l2_dev;
+
+ ret_val = video_register_device(dev->vfd,
+ VFL_TYPE_GRABBER,
+ dev->dev_index);
+ if (ret_val < 0) {
+ marucam_err("video_register_device() failed: %d\n", ret_val);
+ video_device_release(dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ iounmap(dev->args);
+ iounmap(dev->mmregs);
+ pci_release_region(pdev, 0);
+ pci_release_region(pdev, 1);
+ pci_release_region(pdev, 2);
+ pci_disable_device(pdev);
+ kfree(dev);
+ return ret_val;
+ }
+ video_set_drvdata(dev->vfd, dev);
+
+ INIT_LIST_HEAD(&dev->active);
+ spin_lock_init(&dev->slock);
+ mutex_init(&dev->mlock);
+ dev->opstate = S_IDLE;
+ dev->pdev = pdev;
+
+
+ snprintf(dev->vfd->name, sizeof(dev->vfd->name), "%s (%i)",
+ marucam_video_dev.name, dev->vfd->num);
+
+ marucam_instance[dev->dev_index] = dev;
+ marucam_info("Maru Camera(%u.%u.%u) device is registerd as /dev/video%d\n",
+ (MARUCAM_VERSION >> 16) & 0xFF,
+ (MARUCAM_VERSION >> 8) & 0xFF,
+ MARUCAM_VERSION & 0xFF,
+ dev->vfd->num);
+
+ return 0;
+}
+
+static void marucam_pci_removedev(struct pci_dev *pdev)
+{
+ unsigned char dev_index;
+ struct marucam_device *dev = pci_get_drvdata(pdev);
+
+ if (dev == NULL) {
+ marucam_warn("pci_remove on unknown pdev %p\n", pdev);
+ return;
+ }
+ dev_index = dev->dev_index;
+
+ video_unregister_device(dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ iounmap(dev->args);
+ iounmap(dev->mmregs);
+ pci_release_region(dev->pdev, 0);
+ pci_release_region(dev->pdev, 1);
+ pci_release_region(dev->pdev, 2);
+ pci_disable_device(dev->pdev);
+
+ memset(dev, 0x00, sizeof(struct marucam_device));
+ kfree(dev);
+ dev = NULL;
+ marucam_instance[dev_index] = NULL;
+}
+
+static struct pci_driver marucam_pci_driver = {
+ .name = MARUCAM_MODULE_NAME,
+ .id_table = marucam_pci_id_tbl,
+ .probe = marucam_pci_initdev,
+ .remove = marucam_pci_removedev,
+};
+
+static int __init marucam_init(void)
+{
+ return pci_register_driver(&marucam_pci_driver);
+}
+
+static void __exit marucam_exit(void)
+{
+ pci_unregister_driver(&marucam_pci_driver);
+}
+
+module_init(marucam_init);
+module_exit(marucam_exit);
--- /dev/null
+/*
+ * Virtual device node
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * JinHyung Choi <jinhyung2.choi@samsung.com>
+ * SooYoung Ha <yoosah.ha@samsung.com>
+ * Sungmin Ha <sungmin82.ha@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#define __MAX_BUF_SIZE 1024
+#define __MAX_BUF_JACK 512
+
+struct msg_info {
+ char buf[__MAX_BUF_SIZE];
+
+ uint16_t type;
+ uint16_t req;
+};
+
+struct virtio_jack {
+ struct virtio_device* vdev;
+ struct virtqueue* vq;
+
+ struct msg_info msginfo;
+
+ struct scatterlist sg_vq[2];
+
+ int flags;
+ struct mutex lock;
+};
+
+enum jack_types {
+ jack_type_list = 0,
+ jack_type_charger,
+ jack_type_earjack,
+ jack_type_earkey,
+ jack_type_hdmi,
+ jack_type_usb,
+ jack_type_max
+};
+
+enum jack_capabilities {
+ jack_cap_charger = 0x01,
+ jack_cap_earjack = 0x02,
+ jack_cap_earkey = 0x04,
+ jack_cap_hdmi = 0x08,
+ jack_cap_usb = 0x10
+};
+
+enum request_cmd {
+ request_get = 0,
+ request_set,
+ request_answer
+};
+
+struct jack_data {
+ int no;
+ char buffer[50];
+};
+
+struct virtio_jack *v_jack;
+
+static char jack_data [__MAX_BUF_JACK];
+static int jack_capability = 0;
+
+static DECLARE_WAIT_QUEUE_HEAD(wq);
+
+static struct virtio_device_id id_table[] = { { VIRTIO_ID_JACK,
+ VIRTIO_DEV_ANY_ID }, { 0 }, };
+
+#define DEVICE_NAME "jack"
+#define JACK_DEBUG
+
+#ifdef JACK_DEBUG
+#define DLOG(level, fmt, ...) \
+ printk(level "maru_%s: " fmt, DEVICE_NAME, ##__VA_ARGS__)
+#else
+// do nothing
+#define DLOG(level, fmt, ...)
+#endif
+
+static int jack_atoi(const char *name)
+{
+ int val = 0;
+
+ for (;; name++) {
+ switch (*name) {
+ case '0' ... '9':
+ val = 10*val+(*name-'0');
+ break;
+ default:
+ return val;
+ }
+ }
+}
+
+static void jack_vq_done(struct virtqueue *vq) {
+ unsigned int len;
+ struct msg_info* msg;
+
+ msg = (struct msg_info*) virtqueue_get_buf(v_jack->vq, &len);
+ if (msg == NULL) {
+ DLOG(KERN_ERR, "failed to virtqueue_get_buf");
+ return;
+ }
+
+ if (msg->req != request_answer) {
+ DLOG(KERN_DEBUG, "receive queue- not an answer message: %d", msg->req);
+ return;
+ }
+ if (msg->buf == NULL) {
+ DLOG(KERN_ERR, "receive queue- message from host is NULL.");
+ return;
+ }
+
+ DLOG(KERN_DEBUG, "msg buf: %s, req: %d, type: %d", msg->buf, msg->req, msg->type);
+
+ mutex_lock(&v_jack->lock);
+ strcpy(jack_data, msg->buf);
+ v_jack->flags = 1;
+ mutex_unlock(&v_jack->lock);
+
+ wake_up_interruptible(&wq);
+}
+
+static void set_jack_data(int type, const char* buf)
+{
+ int err = 0;
+
+ if (buf == NULL) {
+ DLOG(KERN_ERR, "set_jack buf is NULL.");
+ return;
+ }
+
+ if (v_jack == NULL) {
+ DLOG(KERN_ERR, "Invalid jack handle");
+ return;
+ }
+
+ mutex_lock(&v_jack->lock);
+ memset(jack_data, 0, sizeof(jack_data));
+ memset(&v_jack->msginfo, 0, sizeof(v_jack->msginfo));
+
+ strcpy(jack_data, buf);
+
+ v_jack->msginfo.req = request_set;
+ v_jack->msginfo.type = type;
+ strcpy(v_jack->msginfo.buf, buf);
+ mutex_unlock(&v_jack->lock);
+
+ DLOG(KERN_DEBUG, "set_jack_data type: %d, req: %d, buf: %s",
+ v_jack->msginfo.type, v_jack->msginfo.req, v_jack->msginfo.buf);
+
+ err = virtqueue_add_outbuf(v_jack->vq, v_jack->sg_vq, 1, &v_jack->msginfo, GFP_ATOMIC);
+ if (err < 0) {
+ DLOG(KERN_ERR, "failed to add buffer to virtqueue (err = %d)", err);
+ return;
+ }
+
+ virtqueue_kick(v_jack->vq);
+}
+
+static int get_jack_data(int type, char* data)
+{
+ struct scatterlist *sgs[2];
+ int err = 0;
+
+ if (v_jack == NULL) {
+ DLOG(KERN_ERR, "Invalid jack handle");
+ return -1;
+ }
+
+ mutex_lock(&v_jack->lock);
+ memset(&v_jack->msginfo, 0, sizeof(v_jack->msginfo));
+
+ v_jack->msginfo.req = request_get;
+ v_jack->msginfo.type = type;
+ mutex_unlock(&v_jack->lock);
+
+ DLOG(KERN_DEBUG, "get_jack_data type: %d, req: %d",
+ v_jack->msginfo.type, v_jack->msginfo.req);
+
+ sgs[0] = &v_jack->sg_vq[0];
+ sgs[1] = &v_jack->sg_vq[1];
+ err = virtqueue_add_sgs(v_jack->vq, sgs, 1, 1, &v_jack->msginfo, GFP_ATOMIC);
+ if (err < 0) {
+ DLOG(KERN_ERR, "failed to add buffer to virtqueue (err = %d)", err);
+ return -1;
+ }
+
+ virtqueue_kick(v_jack->vq);
+
+ wait_event_interruptible(wq, v_jack->flags != 0);
+
+ mutex_lock(&v_jack->lock);
+ v_jack->flags = 0;
+ memcpy(data, jack_data, strlen(jack_data));
+ mutex_unlock(&v_jack->lock);
+
+ return 0;
+}
+
+static int get_data_for_show(int type, char* buf)
+{
+ int ret;
+ char jack_data[__MAX_BUF_JACK];
+ memset(jack_data, 0, sizeof(jack_data));
+ ret = get_jack_data(type, jack_data);
+ if (ret)
+ return 0;
+ return sprintf(buf, "%s", jack_data);
+
+}
+
+static ssize_t show_charger_online(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return get_data_for_show(jack_type_charger, buf);
+}
+
+static ssize_t store_charger_online(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ set_jack_data(jack_type_charger, buf);
+ return strnlen(buf, count);
+}
+
+static ssize_t show_earjack_online(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return get_data_for_show(jack_type_earjack, buf);
+}
+
+static ssize_t store_earjack_online(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ set_jack_data(jack_type_earjack, buf);
+ return strnlen(buf, count);
+}
+
+static ssize_t show_earkey_online(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return get_data_for_show(jack_type_earkey, buf);
+}
+
+static ssize_t store_earkey_online(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ set_jack_data(jack_type_earkey, buf);
+ return strnlen(buf, count);
+}
+
+static ssize_t show_hdmi_online(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return get_data_for_show(jack_type_hdmi, buf);
+}
+
+static ssize_t store_hdmi_online(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ set_jack_data(jack_type_hdmi, buf);
+ return strnlen(buf, count);
+}
+
+static ssize_t show_usb_online(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return get_data_for_show(jack_type_usb, buf);
+}
+
+static ssize_t store_usb_online(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ set_jack_data(jack_type_usb, buf);
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(charger_online, S_IRUGO | S_IWUSR, show_charger_online, store_charger_online);
+static DEVICE_ATTR(earjack_online, S_IRUGO | S_IWUSR, show_earjack_online, store_earjack_online);
+static DEVICE_ATTR(earkey_online, S_IRUGO | S_IWUSR, show_earkey_online, store_earkey_online);
+static DEVICE_ATTR(hdmi_online, S_IRUGO | S_IWUSR, show_hdmi_online, store_hdmi_online);
+static DEVICE_ATTR(usb_online, S_IRUGO | S_IWUSR, show_usb_online, store_usb_online);
+
+static int maru_jack_sysfs_create_file(struct device *dev)
+{
+ int result = 0;
+
+ DLOG(KERN_INFO, "sysfs_create_file\n");
+
+ if (jack_capability & jack_cap_charger) {
+ result = device_create_file(dev, &dev_attr_charger_online);
+ if (result){
+ DLOG(KERN_ERR, "failed to create charger_online file\n");
+ return result;
+ }
+ }
+
+ if (jack_capability & jack_cap_earjack) {
+ result = device_create_file(dev, &dev_attr_earjack_online);
+ if (result){
+ DLOG(KERN_ERR, "failed to create earjack_online file\n");
+ return result;
+ }
+ }
+
+ if (jack_capability & jack_cap_earkey) {
+ result = device_create_file(dev, &dev_attr_earkey_online);
+ if (result){
+ DLOG(KERN_ERR, "failed to create earkey_online file\n");
+ return result;
+ }
+ }
+
+ if (jack_capability & jack_cap_hdmi) {
+ result = device_create_file(dev, &dev_attr_hdmi_online);
+ if (result){
+ DLOG(KERN_ERR, "failed to create hdmi_online file\n");
+ return result;
+ }
+ }
+
+ if (jack_capability & jack_cap_usb) {
+ result = device_create_file(dev, &dev_attr_usb_online);
+ if (result){
+ DLOG(KERN_ERR, "failed to create usb_online file\n");
+ return result;
+ }
+ }
+
+ return 0;
+}
+
+
+static void maru_jack_sysfs_remove_file(struct device *dev)
+{
+ DLOG(KERN_INFO, "sysfs_remove_file\n");
+
+ device_remove_file(dev, &dev_attr_charger_online);
+ device_remove_file(dev, &dev_attr_earjack_online);
+ device_remove_file(dev, &dev_attr_earkey_online);
+ device_remove_file(dev, &dev_attr_hdmi_online);
+ device_remove_file(dev, &dev_attr_usb_online);
+}
+
+static void maru_jack_sysfs_dev_release(struct device *dev)
+{
+ DLOG(KERN_INFO, "sysfs_dev_release\n");
+}
+
+static struct platform_device the_pdev = {
+ .name = DEVICE_NAME,
+ .id = -1,
+ .dev = {
+ .release = maru_jack_sysfs_dev_release,
+ }
+};
+
+static int jack_probe(struct virtio_device* dev){
+ int err = 0, index = 0;
+ struct jack_data *data;
+ char jack_data[__MAX_BUF_JACK];
+
+ DLOG(KERN_INFO, "jack_probe\n");
+
+ v_jack = kmalloc(sizeof(struct virtio_jack), GFP_KERNEL);
+
+ v_jack->vdev = dev;
+ dev->priv = v_jack;
+ v_jack->flags = 0;
+
+ err = platform_device_register(&the_pdev);
+ if (err) {
+ DLOG(KERN_ERR, "platform_device_register failure\n");
+ return err;
+ }
+
+ data = kzalloc(sizeof(struct jack_data), GFP_KERNEL);
+ if (!data) {
+ DLOG(KERN_ERR, "kzalloc failure\n");
+ platform_device_unregister(&the_pdev);
+ return -ENOMEM;
+ }
+
+ dev_set_drvdata(&the_pdev.dev, (void*)data);
+
+ v_jack->vq = virtio_find_single_vq(dev, jack_vq_done, "jack");
+ if (IS_ERR(v_jack->vq)) {
+ DLOG(KERN_ERR, "virtio queue is not found.\n");
+ kfree(data);
+ platform_device_unregister(&the_pdev);
+ return err;
+ }
+
+ virtqueue_enable_cb(v_jack->vq);
+
+ memset(&v_jack->msginfo, 0x00, sizeof(v_jack->msginfo));
+
+ sg_init_table(v_jack->sg_vq, 2);
+ for (; index < 2; index++) {
+ sg_set_buf(&v_jack->sg_vq[index], &v_jack->msginfo, sizeof(v_jack->msginfo));
+ }
+
+ mutex_init(&v_jack->lock);
+
+ DLOG(KERN_INFO, "request jack capability");
+
+ memset(jack_data, 0, sizeof(jack_data));
+
+ err = get_jack_data(jack_type_list, jack_data);
+ if (err) {
+ DLOG(KERN_ERR, "Cannot get jack list.\n");
+ kfree(data);
+ platform_device_unregister(&the_pdev);
+ }
+
+ jack_capability = jack_atoi(jack_data);
+ DLOG(KERN_INFO, "jack capability is %02x", jack_capability);
+
+ err = maru_jack_sysfs_create_file(&the_pdev.dev);
+ if (err) {
+ DLOG(KERN_ERR, "sysfs_create_file failure\n");
+ kfree(data);
+ platform_device_unregister(&the_pdev);
+ return err;
+ }
+
+ return 0;
+}
+
+static void jack_remove(struct virtio_device* dev){
+ void *data = dev_get_drvdata(&the_pdev.dev);
+
+ DLOG(KERN_INFO, "sysfs_exit\n");
+
+ if (data) {
+ kfree(data);
+ }
+ maru_jack_sysfs_remove_file(&the_pdev.dev);
+ platform_device_unregister(&the_pdev);
+
+ if (v_jack) {
+ kfree(v_jack);
+ v_jack = NULL;
+ }
+}
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_jack_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE ,
+ },
+ .id_table = id_table,
+ .probe = jack_probe,
+ .remove = jack_remove,
+};
+
+static int __init maru_jack_init(void)
+{
+ DLOG(KERN_INFO, "maru_%s: init\n", DEVICE_NAME);
+ return register_virtio_driver(&virtio_jack_driver);
+}
+
+static void __exit maru_jack_exit(void)
+{
+ DLOG(KERN_INFO, "maru_%s: exit\n", DEVICE_NAME);
+ unregister_virtio_driver(&virtio_jack_driver);
+}
+
+module_init(maru_jack_init);
+module_exit(maru_jack_exit);
+
+MODULE_LICENSE("GPL2");
+MODULE_AUTHOR("Jinhyung Choi <jinhyung2.choi@samsung.com>");
+MODULE_DESCRIPTION("Emulator Virtio Power Driver");
--- /dev/null
+/*
+ * Virtual device node
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * JinHyung Choi <jinhyung2.choi@samsung.com>
+ * SooYoung Ha <yoosah.ha@samsung.com>
+ * Sungmin Ha <sungmin82.ha@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+
+#define DEVICE_NAME "power_supply"
+#define FILE_PERMISSION (S_IRUGO | S_IWUSR)
+
+#define __MAX_BUF_POWER 512
+
+//#define DEBUG_MARU_POWER_SUPPLY
+
+#ifdef DEBUG_MARU_POWER_SUPPLY
+#define DLOG(level, fmt, ...) \
+ printk(level "maru_%s: " fmt, DEVICE_NAME, ##__VA_ARGS__)
+#else
+// do nothing
+#define DLOG(level, fmt, ...)
+#endif
+
+#define __MAX_BUF_SIZE 1024
+
+static struct virtio_device_id id_table[] = { { VIRTIO_ID_POWER,
+ VIRTIO_DEV_ANY_ID }, { 0 }, };
+
+struct msg_info {
+ char buf[__MAX_BUF_SIZE];
+
+ uint16_t type;
+ uint16_t req;
+};
+
+struct virtio_power {
+ struct virtio_device* vdev;
+ struct virtqueue* vq;
+
+ struct msg_info msginfo;
+
+ struct scatterlist sg_vq[2];
+
+ int flags;
+ struct mutex lock;
+};
+
+enum power_types {
+ power_type_capacity = 0,
+ power_type_charge_full,
+ power_type_charge_now,
+ power_type_max
+};
+
+enum request_cmd {
+ request_get = 0,
+ request_set,
+ request_answer
+};
+
+struct virtio_power *v_power;
+
+static struct class* power_class;
+static struct device* power_device;
+
+static char power_data [__MAX_BUF_POWER];
+
+static DECLARE_WAIT_QUEUE_HEAD(wq);
+
+static void power_vq_done(struct virtqueue *vq) {
+ unsigned int len;
+ struct msg_info* msg;
+
+ msg = (struct msg_info*) virtqueue_get_buf(v_power->vq, &len);
+ if (msg == NULL) {
+ DLOG(KERN_ERR, "failed to virtqueue_get_buf");
+ return;
+ }
+
+ if (msg->req != request_answer) {
+ DLOG(KERN_DEBUG, "receive queue- not an answer message: %d", msg->req);
+ return;
+ }
+ if (msg->buf == NULL) {
+ DLOG(KERN_ERR, "receive queue- message from host is NULL.");
+ return;
+ }
+
+ DLOG(KERN_DEBUG, "msg buf: %s, req: %d, type: %d", msg->buf, msg->req, msg->type);
+
+ mutex_lock(&v_power->lock);
+ memset(power_data, 0, __MAX_BUF_POWER);
+ strcpy(power_data, msg->buf);
+ v_power->flags = 1;
+ mutex_unlock(&v_power->lock);
+
+ wake_up_interruptible(&wq);
+}
+
+static void set_power_data(int type, const char* buf)
+{
+ int err = 0;
+
+ if (buf == NULL) {
+ DLOG(KERN_ERR, "set_power buf is NULL.");
+ return;
+ }
+
+ if (v_power == NULL) {
+ DLOG(KERN_ERR, "Invalid power handle");
+ return;
+ }
+
+ mutex_lock(&v_power->lock);
+ memset(power_data, 0, __MAX_BUF_POWER);
+ memset(&v_power->msginfo, 0, sizeof(v_power->msginfo));
+
+ strcpy(power_data, buf);
+
+ v_power->msginfo.req = request_set;
+ v_power->msginfo.type = type;
+ strcpy(v_power->msginfo.buf, buf);
+ mutex_unlock(&v_power->lock);
+
+ DLOG(KERN_DEBUG, "set_power_data type: %d, req: %d, buf: %s",
+ v_power->msginfo.type, v_power->msginfo.req, v_power->msginfo.buf);
+
+ err = virtqueue_add_outbuf(v_power->vq, v_power->sg_vq, 1, &v_power->msginfo, GFP_ATOMIC);
+ if (err < 0) {
+ DLOG(KERN_ERR, "failed to add buffer to virtqueue (err = %d)", err);
+ return;
+ }
+
+ virtqueue_kick(v_power->vq);
+}
+
+static int get_power_data(int type, char* data)
+{
+ struct scatterlist *sgs[2];
+ int err = 0;
+
+ if (v_power == NULL || data == NULL) {
+ DLOG(KERN_ERR, "Invalid power handle or data is NULL");
+ return -1;
+ }
+
+ mutex_lock(&v_power->lock);
+ memset(&v_power->msginfo, 0, sizeof(v_power->msginfo));
+
+ v_power->msginfo.req = request_get;
+ v_power->msginfo.type = type;
+ mutex_unlock(&v_power->lock);
+
+ DLOG(KERN_DEBUG, "get_power_data type: %d, req: %d",
+ v_power->msginfo.type, v_power->msginfo.req);
+
+ sgs[0] = &v_power->sg_vq[0];
+ sgs[1] = &v_power->sg_vq[1];
+ err = virtqueue_add_sgs(v_power->vq, sgs, 1, 1, &v_power->msginfo, GFP_ATOMIC);
+ if (err < 0) {
+ DLOG(KERN_ERR, "failed to add buffer to virtqueue (err = %d)", err);
+ return -1;
+ }
+
+ virtqueue_kick(v_power->vq);
+
+ wait_event_interruptible(wq, v_power->flags != 0);
+
+ mutex_lock(&v_power->lock);
+ v_power->flags = 0;
+ memcpy(data, power_data, strlen(power_data));
+ mutex_unlock(&v_power->lock);
+
+ return 0;
+}
+
+static int get_data_for_show(int type, char* buf)
+{
+ int ret;
+ char power_data[__MAX_BUF_POWER];
+ memset(power_data, 0, sizeof(power_data));
+ ret = get_power_data(type, power_data);
+ if (ret)
+ return 0;
+ return sprintf(buf, "%s", power_data);
+
+}
+
+static ssize_t show_capacity(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return get_data_for_show(power_type_capacity, buf);
+}
+
+static ssize_t store_capacity(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ set_power_data(power_type_capacity, buf);
+ return strnlen(buf, count);
+}
+
+static ssize_t show_charge_full(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return get_data_for_show(power_type_charge_full, buf);
+}
+
+static ssize_t store_charge_full(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ set_power_data(power_type_charge_full, buf);
+ return strnlen(buf, count);
+}
+
+static ssize_t show_charge_now(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return get_data_for_show(power_type_charge_now, buf);
+}
+
+static ssize_t store_charge_now(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ set_power_data(power_type_charge_now, buf);
+ return strnlen(buf, count);
+}
+
+static struct device_attribute ps_device_attributes[] = {
+ __ATTR(capacity, FILE_PERMISSION, show_capacity, store_capacity),
+ __ATTR(charge_full, FILE_PERMISSION, show_charge_full, store_charge_full),
+ __ATTR(charge_now, FILE_PERMISSION, show_charge_now, store_charge_now),
+};
+
+static void class_cleanup (void)
+{
+ int i = 2;
+
+ for (; i > 0; i--) {
+
+ if (power_device == NULL)
+ continue;
+
+ device_remove_file(power_device, &ps_device_attributes[i]);
+
+ device_unregister(power_device);
+
+ device_destroy(power_class, MKDEV(0,0));
+ }
+
+ class_destroy(power_class);
+ power_class = NULL;
+}
+
+static int init_device(void)
+{
+ int err = 0, i = 0;
+ power_device = device_create(power_class, NULL, MKDEV(0,0), NULL, "battery");
+
+ for (i = 0; i < 3; i++) {
+ err = device_create_file(power_device, &ps_device_attributes[i]);
+ if (err) {
+ printk(KERN_ERR
+ "maru_%s: failed to create power_supply files\n", DEVICE_NAME);
+ goto device_err;
+ }
+ }
+
+ return err;
+device_err:
+ class_cleanup();
+ return -1;
+}
+
+static void cleanup(struct virtio_device* dev) {
+ dev->config->del_vqs(dev);
+
+ if (v_power) {
+ kfree(v_power);
+ v_power = NULL;
+ }
+
+ class_cleanup();
+}
+
+static int power_probe(struct virtio_device* dev)
+{
+ int err = 0;
+ int ret = 0;
+ int index = 0;
+
+ DLOG(KERN_INFO, "Power probe starts");
+
+ v_power = kmalloc(sizeof(struct virtio_power), GFP_KERNEL);
+
+ v_power->vdev = dev;
+ dev->priv = v_power;
+
+ power_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (power_class == NULL) {
+ DLOG(KERN_ERR, "Power class creation is failed.");
+ return -1;
+ }
+
+ ret = init_device();
+ if (ret) {
+ cleanup(dev);
+ return ret;
+ }
+
+ v_power->vq = virtio_find_single_vq(dev, power_vq_done, "power");
+ if (IS_ERR(v_power->vq)) {
+ cleanup(dev);
+ DLOG(KERN_ERR, "failed to init virt queue");
+ return ret;
+ }
+
+ virtqueue_enable_cb(v_power->vq);
+
+ memset(&v_power->msginfo, 0x00, sizeof(v_power->msginfo));
+
+ sg_init_table(v_power->sg_vq, 2);
+ for (; index < 2; index++) {
+ sg_set_buf(&v_power->sg_vq[index], &v_power->msginfo, sizeof(v_power->msginfo));
+ }
+
+ mutex_init(&v_power->lock);
+
+ DLOG(KERN_INFO, "Power probe completes");
+
+ return err;
+}
+
+static void power_remove(struct virtio_device* dev)
+{
+ struct virtio_power* v_power = dev->priv;
+ if (!v_power)
+ {
+ DLOG(KERN_ERR, "virtio_power is NULL");
+ return;
+ }
+
+ dev->config->reset(dev);
+
+ cleanup(dev);
+
+ DLOG(KERN_INFO, "Power driver is removed.");
+}
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_power_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE ,
+ },
+ .id_table = id_table,
+ .probe = power_probe,
+ .remove = power_remove,
+};
+
+static int __init maru_power_supply_init(void)
+{
+ DLOG(KERN_INFO, "maru_%s: init\n", DEVICE_NAME);
+ return register_virtio_driver(&virtio_power_driver);
+}
+
+static void __exit maru_power_supply_exit(void)
+{
+ DLOG(KERN_INFO, "maru_%s: exit\n", DEVICE_NAME);
+ unregister_virtio_driver(&virtio_power_driver);
+}
+
+module_init(maru_power_supply_init);
+module_exit(maru_power_supply_exit);
+
+MODULE_LICENSE("GPL2");
+MODULE_AUTHOR("Jinhyung Choi <jinhyung2.choi@samsung.com>");
+MODULE_DESCRIPTION("Emulator Virtio Power Driver");
--- /dev/null
+/*
+ * Maru Virtio EmulatorVritualDeviceInterface Device Driver
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * DaiYoung Kim <daiyoung777.kim@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/cdev.h>
+
+#define DRIVER_NAME "EVDI"
+
+#define LOGDEBUG(fmt, ...) \
+ printk(KERN_DEBUG "%s: " fmt, DRIVER_NAME, ##__VA_ARGS__)
+
+#define LOGINFO(fmt, ...) \
+ printk(KERN_INFO "%s: " fmt, DRIVER_NAME, ##__VA_ARGS__)
+
+#define LOGERR(fmt, ...) \
+ printk(KERN_ERR "%s: " fmt, DRIVER_NAME, ##__VA_ARGS__)
+
+#define NUM_OF_EVDI 2
+#define DEVICE_NAME "evdi"
+
+/* device protocol */
+#define __MAX_BUF_SIZE 1024
+
+enum ioctl_cmd {
+ IOCTL_CMD_BOOT_DONE,
+};
+
+enum
+{
+ route_qemu = 0,
+ route_control_server = 1,
+ route_monitor = 2
+};
+
+typedef unsigned int CSCliSN;
+
+struct msg_info {
+ char buf[__MAX_BUF_SIZE];
+
+ uint32_t route;
+ uint32_t use;
+ uint16_t count;
+ uint16_t index;
+
+ CSCliSN cclisn;
+};
+
+/* device protocol */
+
+#define SIZEOF_MSG_INFO sizeof(struct msg_info)
+
+struct msg_buf {
+ struct msg_info msg;
+ struct list_head list;
+};
+
+#define SIZEOF_MSG_BUF sizeof(struct msg_buf)
+
+enum {
+ EVID_READ = 0, EVID_WRITE = 1
+};
+
+struct virtevdi_info {
+
+ wait_queue_head_t waitqueue;
+ spinlock_t inbuf_lock;
+ spinlock_t outvq_lock;
+
+ struct cdev cdev;
+ char name[10];
+
+ int index;
+ bool guest_connected;
+
+} *pevdi_info[NUM_OF_EVDI];
+
+struct virtio_evdi {
+ struct virtio_device* vdev;
+ struct virtqueue* rvq;
+ struct virtqueue* svq;
+
+ struct msg_info read_msginfo;
+ struct msg_info send_msginfo;
+
+ struct list_head read_list;
+ struct list_head write_list;
+
+ struct scatterlist sg_read[2];
+ struct scatterlist sg_send[2];
+};
+
+struct virtio_evdi *vevdi;
+
+static struct virtio_device_id id_table[] = { { VIRTIO_ID_EVDI,
+ VIRTIO_DEV_ANY_ID }, { 0 }, };
+
+static dev_t evdi_dev_number;
+static struct class* evdi_class;
+
+
+static void* __xmalloc(size_t size)
+{
+ void* p = kmalloc(size, GFP_KERNEL);
+ if (!p)
+ return NULL;
+ return p;
+}
+
+int _make_buf_and_kick(void)
+{
+ int ret;
+ memset(&vevdi->read_msginfo, 0x00, sizeof(vevdi->read_msginfo));
+ ret = virtqueue_add_inbuf(vevdi->rvq, vevdi->sg_read,
+ 1, &vevdi->read_msginfo, GFP_ATOMIC);
+ if (ret < 0) {
+ LOGERR("failed to add buffer to virtqueue.(%d)\n", ret);
+ return ret;
+ }
+
+ virtqueue_kick(vevdi->rvq);
+
+ return 0;
+}
+
+static int add_inbuf(struct virtqueue *vq, struct msg_info *msg)
+{
+ struct scatterlist sg[1];
+ int ret;
+
+ sg_init_one(sg, msg, sizeof(struct msg_info));
+
+ ret = virtqueue_add_inbuf(vq, sg, 1, msg, GFP_ATOMIC);
+ virtqueue_kick(vq);
+ return ret;
+}
+
+static bool has_readdata(struct virtevdi_info *evdi)
+{
+ bool ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&evdi->inbuf_lock, flags);
+
+ ret = true;
+
+ if (list_empty(&vevdi->read_list))
+ ret = false;
+
+ spin_unlock_irqrestore(&evdi->inbuf_lock, flags);
+
+ return ret;
+}
+
+#define HEADER_SIZE 4
+#define ID_SIZE 10
+#define GUEST_CONNECTION_CATEGORY "guest"
+static void send_guest_connected_msg(bool connected)
+{
+ int err;
+ struct msg_info* _msg;
+ char connect = (char)connected;
+ if (vevdi == NULL) {
+ LOGERR("invalid evdi handle\n");
+ return;
+ }
+
+ _msg = &vevdi->send_msginfo;
+
+ memset(_msg, 0, sizeof(vevdi->send_msginfo));
+
+ memcpy(_msg->buf, GUEST_CONNECTION_CATEGORY, 7);
+ memcpy(_msg->buf + ID_SIZE + 3, &connect, 1);
+ _msg->route = route_control_server;
+ _msg->use = ID_SIZE + HEADER_SIZE;
+ _msg->count = 1;
+ _msg->index = 0;
+ _msg->cclisn = 0;
+
+ err = virtqueue_add_outbuf(vevdi->svq, vevdi->sg_send, 1,
+ _msg, GFP_ATOMIC);
+
+ LOGERR("send guest connection message to qemu with (%d)\n", connected);
+
+ if (err < 0) {
+ LOGERR("failed to add buffer to virtqueue (err = %d)\n", err);
+ return;
+ }
+
+ virtqueue_kick(vevdi->svq);
+}
+
+
+static int evdi_open(struct inode* inode, struct file* filp)
+{
+ int i, ret;
+ struct virtevdi_info* evdi_info;
+ struct cdev *cdev = inode->i_cdev;
+
+ evdi_info = NULL;
+ LOGDEBUG("evdi_open\n");
+
+ for (i = 0; i < NUM_OF_EVDI; i++)
+ {
+ LOGDEBUG("evdi info index = %d, cdev dev = %d, inode dev = %d\n",
+ i, pevdi_info[i]->cdev.dev, cdev->dev);
+
+ if (pevdi_info[i]->cdev.dev == cdev->dev)
+ {
+ evdi_info = pevdi_info[i];
+ break;
+ }
+ }
+
+ filp->private_data = evdi_info;
+
+ evdi_info->guest_connected = true;
+
+ ret = _make_buf_and_kick();
+ if (ret < 0)
+ return ret;
+
+
+ LOGDEBUG("evdi_opened\n");
+ return 0;
+}
+
+static int evdi_close(struct inode* i, struct file* filp) {
+ struct virtevdi_info *evdi_info;
+
+ evdi_info = filp->private_data;
+ evdi_info->guest_connected = false;
+
+ send_guest_connected_msg(false);
+
+ LOGDEBUG("evdi_closed\n");
+ return 0;
+}
+
+
+
+static ssize_t evdi_read(struct file *filp, char __user *ubuf, size_t len,
+ loff_t *f_pos)
+{
+ struct virtevdi_info *evdi;
+
+ ssize_t ret;
+ struct msg_buf* next;
+ unsigned long flags;
+
+ evdi = filp->private_data;
+
+ if (!has_readdata(evdi))
+ {
+ if (filp->f_flags & O_NONBLOCK)
+ {
+ LOGERR("list is empty, return EAGAIN\n");
+ return -EAGAIN;
+ }
+ return -EFAULT;
+ }
+
+
+ next = list_first_entry(&vevdi->read_list, struct msg_buf, list);
+ if (next == NULL) {
+ LOGERR("invliad list entry\n");
+ return -EFAULT;
+ }
+
+ ret = copy_to_user(ubuf, &next->msg, len);
+
+ list_del(&next->list);
+ kfree(next);
+
+ spin_lock_irqsave(&pevdi_info[EVID_READ]->inbuf_lock, flags);
+
+
+ if (add_inbuf(vevdi->rvq, &vevdi->read_msginfo) < 0)
+ {
+ LOGERR("failed add_buf\n");
+ }
+
+ spin_unlock_irqrestore(&pevdi_info[EVID_READ]->inbuf_lock, flags);
+
+ if (ret < 0)
+ return -EFAULT;
+
+
+
+ *f_pos += len;
+
+ return len;
+}
+
+static ssize_t evdi_write(struct file *f, const char __user *ubuf, size_t len,
+ loff_t* f_pos)
+{
+ int err = 0;
+ ssize_t ret = 0;
+
+ if (vevdi == NULL) {
+ LOGERR("invalid evdi handle\n");
+ return 0;
+ }
+
+ memset(&vevdi->send_msginfo, 0, sizeof(vevdi->send_msginfo));
+ ret = copy_from_user(&vevdi->send_msginfo, ubuf, sizeof(vevdi->send_msginfo));
+
+ LOGDEBUG("copy_from_user ret = %zd, msg = %s", ret, vevdi->send_msginfo.buf);
+
+ if (ret) {
+ ret = -EFAULT;
+ return ret;
+ }
+
+
+ err = virtqueue_add_outbuf(vevdi->svq, vevdi->sg_send, 1,
+ &vevdi->send_msginfo, GFP_ATOMIC);
+
+ /*
+ err = virtqueue_add_buf(vevdi->svq, vevdi->sg_send, 1, 0,
+ &_msg, GFP_ATOMIC);*/
+
+ if (err < 0) {
+ LOGERR("failed to add buffer to virtqueue (err = %d)\n", err);
+ return 0;
+ }
+
+ virtqueue_kick(vevdi->svq);
+
+ //LOG("send to host\n");
+
+ return len;
+}
+
+static unsigned int evdi_poll(struct file *filp, poll_table *wait)
+{
+ struct virtevdi_info *evdi;
+ unsigned int ret;
+
+ evdi = filp->private_data;
+ poll_wait(filp, &evdi->waitqueue, wait);
+
+ if (!evdi->guest_connected) {
+ /* evdi got unplugged */
+ return POLLHUP;
+ }
+
+ ret = 0;
+
+ if (has_readdata(evdi))
+ {
+ LOGDEBUG("POLLIN | POLLRDNORM\n");
+ ret |= POLLIN | POLLRDNORM;
+ }
+
+ return ret;
+}
+
+static long evdi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case IOCTL_CMD_BOOT_DONE:
+ LOGINFO("BOOTING DONE.\n");
+ break;
+ default:
+ LOGERR("not available command.\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static struct file_operations evdi_fops = {
+ .owner = THIS_MODULE,
+ .open = evdi_open,
+ .release = evdi_close,
+ .read = evdi_read,
+ .write = evdi_write,
+ .poll = evdi_poll,
+ .unlocked_ioctl = evdi_ioctl,
+};
+
+
+
+static void evdi_recv_done(struct virtqueue *rvq) {
+
+ unsigned int len;
+ unsigned long flags;
+ struct msg_info* _msg;
+ struct msg_buf* msgbuf;
+
+
+
+ /* TODO : check if guest has been connected. */
+
+ _msg = (struct msg_info*) virtqueue_get_buf(vevdi->rvq, &len);
+ if (_msg == NULL ) {
+ LOGERR("failed to virtqueue_get_buf\n");
+ return;
+ }
+
+ do {
+ //LOG("msg use = %d\n", _msg->use);
+ //LOG("msg data = %s\n", _msg->buf);
+
+ /* insert into queue */
+ msgbuf = (struct msg_buf*) __xmalloc(SIZEOF_MSG_BUF);
+ memset(msgbuf, 0x00, sizeof(*msgbuf));
+ memcpy(&(msgbuf->msg), _msg, sizeof(*_msg));
+
+ //LOG("copied msg data = %s, %s\n", msgbuf->msg.buf, _msg->buf);
+
+ spin_lock_irqsave(&pevdi_info[EVID_READ]->inbuf_lock, flags);
+
+ list_add_tail(&msgbuf->list, &vevdi->read_list);
+ //LOG("== wake_up_interruptible = %d!\n", ++g_wake_up_interruptible_count);
+
+ spin_unlock_irqrestore(&pevdi_info[EVID_READ]->inbuf_lock, flags);
+
+ wake_up_interruptible(&pevdi_info[EVID_READ]->waitqueue);
+
+ _msg = (struct msg_info*) virtqueue_get_buf(vevdi->rvq, &len);
+ if (_msg == NULL) {
+ break;
+ }
+
+ } while (true);
+
+
+ /*
+ if (add_inbuf(vevdi->rvq, &vevdi->read_msginfo) < 0)
+ {
+ LOG("failed add_buf\n");
+ }
+ */
+}
+
+static void evdi_send_done(struct virtqueue *svq) {
+ unsigned int len = 0;
+
+ virtqueue_get_buf(svq, &len);
+}
+
+/*
+ *
+ */
+
+static int init_vqs(struct virtio_evdi *evdi) {
+ struct virtqueue *vqs[2];
+ vq_callback_t *callbacks[] = { evdi_recv_done, evdi_send_done };
+ const char *names[] = { "evdi_input", "evdi_output" };
+ int err;
+
+ err = evdi->vdev->config->find_vqs(evdi->vdev, 2, vqs, callbacks, names);
+ if (err < 0)
+ return err;
+
+ evdi->rvq = vqs[0];
+ evdi->svq = vqs[1];
+
+ return 0;
+}
+
+int _init_device(void)
+{
+ int i, ret;
+
+ if (alloc_chrdev_region(&evdi_dev_number, 0, NUM_OF_EVDI, DEVICE_NAME) < 0) {
+ LOGERR("fail to alloc_chrdev_region\n");
+ return -1;
+ }
+
+ evdi_class = class_create(THIS_MODULE, DEVICE_NAME);
+
+ if (evdi_class == NULL ) {
+ unregister_chrdev_region(evdi_dev_number, NUM_OF_EVDI);
+ return -1;
+ }
+
+ for (i = 0; i < NUM_OF_EVDI; i++) {
+ pevdi_info[i] = kmalloc(sizeof(struct virtevdi_info), GFP_KERNEL);
+
+ if (!pevdi_info[i]) {
+ LOGERR("Bad malloc\n");
+ return -ENOMEM;
+ }
+
+ sprintf(pevdi_info[i]->name, "%s%d", DEVICE_NAME, i);
+
+ pevdi_info[i]->index = i;
+ pevdi_info[i]->guest_connected = false;
+
+ cdev_init(&pevdi_info[i]->cdev, &evdi_fops);
+ pevdi_info[i]->cdev.owner = THIS_MODULE;
+ ret = cdev_add(&pevdi_info[i]->cdev, (evdi_dev_number + i), 1);
+
+ /* init wait queue */
+ init_waitqueue_head(&pevdi_info[i]->waitqueue);
+ spin_lock_init(&pevdi_info[i]->inbuf_lock);
+ spin_lock_init(&pevdi_info[i]->outvq_lock);
+
+ if (ret == -1) {
+ LOGERR("Bad cdev\n");
+ return ret;
+ }
+
+ device_create(evdi_class, NULL, (evdi_dev_number + i), NULL, "%s%d",
+ DEVICE_NAME, i);
+ }
+
+ return 0;
+}
+
+
+static int evdi_probe(struct virtio_device* dev) {
+ int ret;
+
+ vevdi = kmalloc(sizeof(struct virtio_evdi), GFP_KERNEL);
+
+ INIT_LIST_HEAD(&vevdi->read_list);
+
+ vevdi->vdev = dev;
+ dev->priv = vevdi;
+
+ ret = _init_device();
+ if (ret)
+ {
+ LOGERR("failed to _init_device\n");
+ return ret;
+ }
+ ret = init_vqs(vevdi);
+ if (ret) {
+ dev->config->del_vqs(dev);
+ kfree(vevdi);
+ dev->priv = NULL;
+
+ LOGERR("failed to init_vqs\n");
+ return ret;
+ }
+
+ /* enable callback */
+ virtqueue_enable_cb(vevdi->rvq);
+ virtqueue_enable_cb(vevdi->svq);
+
+
+ memset(&vevdi->read_msginfo, 0x00, sizeof(vevdi->read_msginfo));
+ sg_set_buf(vevdi->sg_read, &vevdi->read_msginfo, sizeof(struct msg_info));
+
+ memset(&vevdi->send_msginfo, 0x00, sizeof(vevdi->send_msginfo));
+ sg_set_buf(vevdi->sg_send, &vevdi->send_msginfo, sizeof(struct msg_info));
+
+
+ sg_init_one(vevdi->sg_read, &vevdi->read_msginfo, sizeof(vevdi->read_msginfo));
+ sg_init_one(vevdi->sg_send, &vevdi->send_msginfo, sizeof(vevdi->send_msginfo));
+
+
+
+ LOGDEBUG("EVDI Probe completed");
+ return 0;
+}
+
+static void evdi_remove(struct virtio_device* dev)
+{
+ struct virtio_evdi* _evdi = dev->priv;
+ if (!_evdi)
+ {
+ LOGERR("evdi is NULL\n");
+ return;
+ }
+
+ dev->config->reset(dev);
+ dev->config->del_vqs(dev);
+
+ kfree(_evdi);
+
+ LOGDEBUG("driver is removed.\n");
+}
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_evdi_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE ,
+ },
+ .id_table = id_table,
+ .probe = evdi_probe,
+ .remove = evdi_remove,
+};
+
+static int __init evdi_init(void)
+{
+ LOGDEBUG("EVDI driver initialized.\n");
+
+ return register_virtio_driver(&virtio_evdi_driver);
+}
+
+static void __exit evdi_exit(void)
+{
+ int i;
+
+ unregister_chrdev_region(evdi_dev_number, NUM_OF_EVDI);
+
+ for (i = 0; i < NUM_OF_EVDI; i++) {
+ device_destroy(evdi_class, MKDEV(MAJOR(evdi_dev_number), i));
+ cdev_del(&pevdi_info[i]->cdev);
+ kfree(pevdi_info[i]);
+ }
+
+ /*device_destroy(evdi_class, evdi_dev_number);*/
+
+ class_destroy(evdi_class);
+
+ unregister_virtio_driver(&virtio_evdi_driver);
+
+ LOGDEBUG("EVDI driver is destroyed.\n");
+}
+
+module_init(evdi_init);
+module_exit(evdi_exit);
+
+MODULE_LICENSE("GPL2");
+MODULE_AUTHOR("DaiYoung Kim <daiyoung777.kim@samsung.com>");
+MODULE_DESCRIPTION("Emulator Virtio EmulatorVirtualDeviceInterface Driver");
+
--- /dev/null
+/*
+ * Maru Virtio Hwkey Device Driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Sungmin Ha <sungmin82.ha@samsung.com>
+ * Sangjin Kim <sangjin3.kim@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/input.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/kthread.h>
+
+MODULE_LICENSE("GPL2");
+MODULE_AUTHOR("Sungmin Ha <sungmin82.ha@samsung.com>");
+MODULE_DESCRIPTION("Emulator Virtio Hwkey driver");
+
+#define DEVICE_NAME "virtio-hwkey"
+#define MAX_BUF_COUNT 64
+static int vqidx = 0;
+
+/* This structure must match the qemu definitions */
+typedef struct EmulHwkeyEvent {
+ uint8_t event_type;
+ uint32_t keycode;
+} EmulHwkeyEvent;
+
+typedef struct virtio_hwkey
+{
+ struct virtio_device *vdev;
+ struct virtqueue *vq;
+ struct input_dev *idev;
+
+ struct scatterlist sg[MAX_BUF_COUNT];
+ struct EmulHwkeyEvent vbuf[MAX_BUF_COUNT];
+
+ struct mutex event_mutex;
+} virtio_hwkey;
+
+virtio_hwkey *vh;
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_HWKEY, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+/* keep it consistent with emulator-skin definition */
+enum {
+ KEY_PRESSED = 1,
+ KEY_RELEASED = 2,
+};
+
+static int err = 0;
+static unsigned int index = 0;
+
+/**
+ * @brief : callback for virtqueue
+ */
+static void vq_hwkey_callback(struct virtqueue *vq)
+{
+ struct EmulHwkeyEvent hwkey_event;
+ unsigned int len = 0;
+ void *token = NULL;
+#if 0
+ printk(KERN_INFO "vq hwkey callback\n");
+#endif
+ while (1) {
+ memcpy(&hwkey_event, &vh->vbuf[vqidx], sizeof(hwkey_event));
+ if (hwkey_event.event_type == 0) {
+ break;
+ }
+ printk(KERN_INFO "keycode: %d, event_type: %d, vqidx: %d\n", hwkey_event.keycode, hwkey_event.event_type, vqidx);
+ if (hwkey_event.event_type == KEY_PRESSED) {
+ input_event(vh->idev, EV_KEY, hwkey_event.keycode, true);
+ }
+ else if (hwkey_event.event_type == KEY_RELEASED) {
+ input_event(vh->idev, EV_KEY, hwkey_event.keycode, false);
+ }
+ else {
+ printk(KERN_ERR "Unknown event type\n");
+ }
+
+ input_sync(vh->idev);
+ memset(&vh->vbuf[vqidx], 0x00, sizeof(hwkey_event));
+ token = virtqueue_get_buf(vh->vq, &len);
+ if (len > 0) {
+ err = virtqueue_add_inbuf(vh->vq, vh->sg, MAX_BUF_COUNT, token, GFP_ATOMIC);
+ }
+
+ vqidx++;
+ if (vqidx == MAX_BUF_COUNT) {
+ vqidx = 0;
+ }
+ }
+
+ virtqueue_kick(vh->vq);
+}
+
+static int virtio_hwkey_open(struct inode *inode, struct file *file)
+{
+ printk(KERN_INFO "virtio hwkey device is opened\n");
+ return 0;
+}
+
+static int virtio_hwkey_release(struct inode *inode, struct file *file)
+{
+ printk(KERN_INFO "virtio hwkey device is closed\n");
+ return 0;
+}
+
+static int input_hwkey_open(struct input_dev *dev)
+{
+ printk(KERN_INFO "input hwkey device is opened\n");
+ return 0;
+}
+
+static void input_hwkey_close(struct input_dev *dev)
+{
+ printk(KERN_INFO "input hwkey device is closed\n");
+}
+
+struct file_operations virtio_hwkey_fops = {
+ .owner = THIS_MODULE,
+ .open = virtio_hwkey_open,
+ .release = virtio_hwkey_release,
+};
+
+static int virtio_hwkey_probe(struct virtio_device *vdev)
+{
+ int ret = 0;
+ vqidx = 0;
+
+ printk(KERN_INFO "virtio hwkey driver is probed\n");
+
+ /* init virtio */
+ vdev->priv = vh = kmalloc(sizeof(*vh), GFP_KERNEL);
+ if (!vh) {
+ return -ENOMEM;
+ }
+ memset(&vh->vbuf, 0x00, sizeof(vh->vbuf));
+
+ vh->vdev = vdev;
+
+ vh->vq = virtio_find_single_vq(vh->vdev, vq_hwkey_callback, "virtio-hwkey-vq");
+ if (IS_ERR(vh->vq)) {
+ ret = PTR_ERR(vh->vq);
+
+ kfree(vh);
+ vdev->priv = NULL;
+ return ret;
+ }
+
+ /* enable callback */
+ virtqueue_enable_cb(vh->vq);
+
+ sg_init_table(vh->sg, MAX_BUF_COUNT);
+
+ /* prepare the buffers */
+ for (index = 0; index < MAX_BUF_COUNT; index++) {
+ sg_set_buf(&vh->sg[index], &vh->vbuf[index], sizeof(EmulHwkeyEvent));
+
+ if (err < 0) {
+ printk(KERN_ERR "failed to add buffer\n");
+
+ kfree(vh);
+ vdev->priv = NULL;
+ return ret;
+ }
+ }
+
+ err = virtqueue_add_inbuf(vh->vq, vh->sg,
+ MAX_BUF_COUNT, (void *)vh->vbuf, GFP_ATOMIC);
+
+ /* register for input device */
+ vh->idev = input_allocate_device();
+ if (!vh->idev) {
+ printk(KERN_ERR "failed to allocate a input hwkey device\n");
+ ret = -1;
+
+ kfree(vh);
+ vdev->priv = NULL;
+ return ret;
+ }
+
+ vh->idev->name = "Maru Virtio Hwkey";
+ vh->idev->dev.parent = &(vdev->dev);
+
+ input_set_drvdata(vh->idev, vh);
+ vh->idev->open = input_hwkey_open;
+ vh->idev->close = input_hwkey_close;
+
+ vh->idev->evbit[0] = BIT_MASK(EV_KEY);
+ /* to support any keycode */
+ memset(vh->idev->keybit, 0xffffffff, sizeof(unsigned long) * BITS_TO_LONGS(KEY_CNT));
+
+ ret = input_register_device(vh->idev);
+ if (ret) {
+ printk(KERN_ERR "input hwkey driver cannot registered\n");
+ ret = -1;
+
+ input_free_device(vh->idev);
+ kfree(vh);
+ vdev->priv = NULL;
+ return ret;
+ }
+
+ virtqueue_kick(vh->vq);
+ index = 0;
+
+ return 0;
+}
+
+static void virtio_hwkey_remove(struct virtio_device *vdev)
+{
+ virtio_hwkey *vhk = NULL;
+
+ printk(KERN_INFO "virtio hwkey driver is removed\n");
+
+ vhk = vdev->priv;
+
+ vdev->config->reset(vdev); /* reset device */
+ vdev->config->del_vqs(vdev); /* clean up the queues */
+
+ input_unregister_device(vhk->idev);
+
+ kfree(vhk);
+}
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_hwkey_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtio_hwkey_probe,
+ .remove = virtio_hwkey_remove,
+};
+
+static int __init virtio_hwkey_init(void)
+{
+ printk(KERN_INFO "virtio hwkey device is initialized\n");
+ return register_virtio_driver(&virtio_hwkey_driver);
+}
+
+static void __exit virtio_hwkey_exit(void)
+{
+ printk(KERN_INFO "virtio hwkey device is destroyed\n");
+ unregister_virtio_driver(&virtio_hwkey_driver);
+}
+
+module_init(virtio_hwkey_init);
+module_exit(virtio_hwkey_exit);
+
--- /dev/null
+/*
+ * Maru Virtio Keyboard Device Driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Kitae Kim <kitae.kim@samsung.com>
+ * SeokYeon Hwang <syeon.hwang@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/input.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+
+MODULE_LICENSE("GPL2");
+MODULE_AUTHOR("Kitae Kim <kt920.kim@samsung.com>");
+MODULE_DESCRIPTION("Emulator Virtio Keyboard Driver");
+
+#define DRIVER_NAME "virtio-keyboard"
+#define VKBD_LOG(log_level, fmt, ...) \
+ printk(log_level "%s: " fmt, DRIVER_NAME, ##__VA_ARGS__)
+
+#define KBD_BUF_SIZE 100
+static int vqidx = 0;
+
+struct EmulKbdEvent
+{
+ uint16_t code;
+ uint16_t value;
+};
+
+struct virtio_keyboard
+{
+ struct virtio_device *vdev;
+ struct virtqueue *vq;
+ struct input_dev *idev;
+
+ struct EmulKbdEvent kbdevt[KBD_BUF_SIZE];
+ struct scatterlist sg[KBD_BUF_SIZE];
+
+ struct mutex event_mutex;
+};
+
+struct virtio_keyboard *vkbd;
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_KEYBOARD, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static void vq_keyboard_handle(struct virtqueue *vq)
+{
+ int err = 0, len = 0;
+ void *data;
+ struct EmulKbdEvent kbdevent;
+
+ VKBD_LOG(KERN_DEBUG, "virtqueue callback.\n");
+ data = virtqueue_get_buf(vq, &len);
+ if (!data) {
+ VKBD_LOG(KERN_ERR, "there is no available buffer.\n");
+ return;
+ }
+
+ VKBD_LOG(KERN_DEBUG, "vqidx: %d\n", vqidx);
+ while (1) {
+ memcpy(&kbdevent, &vkbd->kbdevt[vqidx], sizeof(kbdevent));
+#if 1
+ if (kbdevent.code == 0) {
+ break;
+ }
+#endif
+ /* how to get keycode and value. */
+ input_event(vkbd->idev, EV_KEY, kbdevent.code, kbdevent.value);
+ input_sync(vkbd->idev);
+ printk(KERN_ERR "input_event code = %d, value = %d\n", kbdevent.code, kbdevent.value);
+ memset(&vkbd->kbdevt[vqidx], 0x00, sizeof(kbdevent));
+ vqidx++;
+ if (vqidx == KBD_BUF_SIZE) {
+ vqidx = 0;
+ }
+ }
+ err = virtqueue_add_inbuf(vq, vkbd->sg, KBD_BUF_SIZE, (void *)vkbd->kbdevt, GFP_ATOMIC);
+ if (err < 0) {
+ VKBD_LOG(KERN_ERR, "failed to add buffer to virtqueue.\n");
+ return;
+ }
+
+ virtqueue_kick(vkbd->vq);
+}
+
+static int input_keyboard_open(struct input_dev *dev)
+{
+ VKBD_LOG(KERN_DEBUG, "input_keyboard_open\n");
+ return 0;
+}
+
+static void input_keyboard_close(struct input_dev *dev)
+{
+ VKBD_LOG(KERN_DEBUG, "input_keyboard_close\n");
+}
+
+#if 0
+static int virtio_keyboard_open(struct inode *inode, struct file *file)
+{
+ VKBD_LOG(KERN_DEBUG, "opened.\n");
+ return 0;
+}
+
+static int virtio_keyboard_release(struct inode *inode, struct file *file)
+{
+ VKBD_LOG(KERN_DEBUG, "closed\n");
+ return 0;
+}
+
+struct file_operations virtio_keyboard_fops = {
+ .owner = THIS_MODULE,
+ .open = virtio_keyboard_open,
+ .release = virtio_keyboard_release,
+};
+#endif
+
+static int virtio_keyboard_probe(struct virtio_device *vdev)
+{
+ int ret = 0;
+ int index = 0;
+
+ VKBD_LOG(KERN_INFO, "driver is probed\n");
+ vqidx = 0;
+
+ vdev->priv = vkbd = kmalloc(sizeof(struct virtio_keyboard), GFP_KERNEL);
+ if (!vkbd) {
+ return -ENOMEM;
+ }
+ memset(&vkbd->kbdevt, 0x00, sizeof(vkbd->kbdevt));
+
+ vkbd->vdev = vdev;
+ mutex_init(&vkbd->event_mutex);
+
+ vkbd->vq = virtio_find_single_vq(vkbd->vdev, vq_keyboard_handle, "virtio-keyboard-vq");
+ if (IS_ERR(vkbd->vq)) {
+ ret = PTR_ERR(vkbd->vq);
+ kfree(vkbd);
+ vdev->priv = NULL;
+ return ret;
+ }
+
+ for (; index < KBD_BUF_SIZE; index++) {
+ sg_set_buf(&vkbd->sg[index],
+ &vkbd->kbdevt[index],
+ sizeof(struct EmulKbdEvent));
+ }
+
+ ret = virtqueue_add_inbuf(vkbd->vq, vkbd->sg, KBD_BUF_SIZE, (void *)vkbd->kbdevt, GFP_ATOMIC);
+ if (ret < 0) {
+ VKBD_LOG(KERN_ERR, "failed to add buffer to virtqueue.\n");
+ kfree(vkbd);
+ vdev->priv = NULL;
+ return ret;
+ }
+
+ /* register for input device */
+ vkbd->idev = input_allocate_device();
+ if (!vkbd->idev) {
+ VKBD_LOG(KERN_ERR, "failed to allocate a input device.\n");
+ kfree(vkbd);
+ vdev->priv = NULL;
+ return -ENOMEM;
+ }
+
+ vkbd->idev->name = "Maru VirtIO Keyboard";
+ vkbd->idev->dev.parent = &(vdev->dev);
+
+ input_set_drvdata(vkbd->idev, vkbd);
+ vkbd->idev->open = input_keyboard_open;
+ vkbd->idev->close = input_keyboard_close;
+
+ /* initialize a device as a keyboard device.
+ * refer to struct input_dev from input.h. */
+ vkbd->idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP)
+ | BIT_MASK(EV_MSC) | BIT_MASK(EV_LED);
+ vkbd->idev->ledbit[0] = BIT_MASK(LED_NUML) | BIT_MASK(LED_CAPSL)
+ | BIT_MASK(LED_SCROLLL) | BIT_MASK(LED_COMPOSE)
+ | BIT_MASK(LED_KANA);
+ set_bit(MSC_SCAN, vkbd->idev->mscbit);
+
+ /* set keybit field as xinput keyboard. */
+ vkbd->idev->keybit[0] = 0xfffffffe;
+ vkbd->idev->keybit[1] = 0xffffffff;
+ vkbd->idev->keybit[2] = 0xffefffff;
+ vkbd->idev->keybit[3] = 0xfebeffdf;
+ vkbd->idev->keybit[4] = 0xc14057ff;
+ vkbd->idev->keybit[5] = 0xff9f207a;
+ vkbd->idev->keybit[6] = 0x7;
+ vkbd->idev->keybit[7] = 0x10000;
+
+ ret = input_register_device(vkbd->idev);
+ if (ret) {
+ VKBD_LOG(KERN_ERR, "failed to register a input device.\n");
+ input_free_device(vkbd->idev);
+ kfree(vkbd);
+ vdev->priv = NULL;
+ return ret;
+ }
+
+ for (; index < KBD_BUF_SIZE; index++) {
+ sg_set_buf(&vkbd->sg[index],
+ &vkbd->kbdevt[index],
+ sizeof(struct EmulKbdEvent));
+ }
+
+ virtqueue_kick(vkbd->vq);
+
+ return 0;
+}
+
+static void virtio_keyboard_remove(struct virtio_device *vdev)
+{
+ VKBD_LOG(KERN_INFO, "driver is removed.\n");
+ if (!vkbd) {
+ VKBD_LOG(KERN_ERR, "vkbd is NULL.\n");
+ return;
+ }
+
+ vdev->config->reset(vdev);
+ vdev->config->del_vqs(vdev);
+
+ input_unregister_device(vkbd->idev);
+
+ kfree(vkbd);
+ vkbd = NULL;
+}
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_keyboard_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .id_table = id_table,
+ .probe = virtio_keyboard_probe,
+ .remove = virtio_keyboard_remove,
+#if 0
+#ifdef CONFIG_PM
+ .freeze = virtio_codec_freeze,
+ .restore = virtio_codec_restore,
+#endif
+#endif
+};
+
+static int __init virtio_keyboard_init(void)
+{
+ VKBD_LOG(KERN_INFO, "driver is initialized.\n");
+ return register_virtio_driver(&virtio_keyboard_driver);
+}
+
+static void __exit virtio_keyboard_exit(void)
+{
+ VKBD_LOG(KERN_INFO, "driver is destroyed.\n");
+ unregister_virtio_driver(&virtio_keyboard_driver);
+}
+
+module_init(virtio_keyboard_init);
+module_exit(virtio_keyboard_exit);
--- /dev/null
+/*
+ * Maru Virtio NFC Device Driver
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Munkyu Im <munkyu.im@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/cdev.h>
+
+#define DRIVER_NAME "NFC"
+
+#define LOG(fmt, ...) \
+ printk(KERN_ERR "%s: " fmt, DRIVER_NAME, ##__VA_ARGS__)
+
+#define NUM_OF_NFC 2
+#define DEVICE_NAME "nfc"
+
+/* device protocol */
+#define NFC_MAX_BUF_SIZE 4096
+
+struct msg_info {
+ unsigned char client_id;
+ unsigned char client_type;
+ uint32_t use;
+ char buf[NFC_MAX_BUF_SIZE];
+};
+
+static int g_read_count = 0;
+
+/* device protocol */
+
+struct msg_buf {
+ struct msg_info msg;
+ struct list_head list;
+};
+
+#define SIZEOF_MSG_BUF sizeof(struct msg_buf)
+
+enum {
+ NFC_READ = 0, NFC_WRITE = 1
+};
+
+struct virtnfc_info {
+
+ wait_queue_head_t waitqueue;
+ spinlock_t inbuf_lock;
+ spinlock_t outvq_lock;
+
+ struct cdev cdev;
+ char name[10];
+
+ int index;
+ bool guest_connected;
+
+} *pnfc_info[NUM_OF_NFC];
+
+struct virtio_nfc {
+ struct virtio_device* vdev;
+ struct virtqueue* rvq;
+ struct virtqueue* svq;
+
+ struct msg_info read_msginfo;
+ struct msg_info send_msginfo;
+
+ struct list_head read_list;
+ struct list_head write_list;
+
+ struct scatterlist sg_read[2];
+ struct scatterlist sg_send[2];
+};
+
+struct virtio_nfc *vnfc;
+
+static struct virtio_device_id id_table[] = { { VIRTIO_ID_NFC,
+ VIRTIO_DEV_ANY_ID }, { 0 }, };
+
+static dev_t nfc_dev_number;
+static struct class* nfc_class;
+
+
+static void* __xmalloc(size_t size)
+{
+ void* p = kmalloc(size, GFP_KERNEL);
+ if (!p)
+ return NULL;
+ return p;
+}
+
+int make_buf_and_kick(void)
+{
+ int ret;
+ memset(&vnfc->read_msginfo, 0x00, sizeof(vnfc->read_msginfo));
+ ret = virtqueue_add_inbuf(vnfc->rvq, vnfc->sg_read,
+ 1, &vnfc->read_msginfo, GFP_ATOMIC);
+ if (ret < 0) {
+ LOG("failed to add buffer to virtqueue.(%d)\n", ret);
+ return ret;
+ }
+
+ virtqueue_kick(vnfc->rvq);
+
+ return 0;
+}
+
+static int add_inbuf(struct virtqueue *vq, struct msg_info *msg)
+{
+ struct scatterlist sg[1];
+ int ret;
+
+ sg_init_one(sg, msg, NFC_MAX_BUF_SIZE);
+
+ ret = virtqueue_add_inbuf(vq, sg, 1, msg, GFP_ATOMIC);
+ virtqueue_kick(vq);
+ return ret;
+}
+
+static bool has_readdata(struct virtnfc_info *nfc)
+{
+ bool ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nfc->inbuf_lock, flags);
+
+ ret = true;
+
+ if (list_empty(&vnfc->read_list))
+ ret = false;
+
+ spin_unlock_irqrestore(&nfc->inbuf_lock, flags);
+
+ return ret;
+}
+
+
+static int nfc_open(struct inode* inode, struct file* filp)
+{
+ int i, ret;
+ struct virtnfc_info* nfc_info;
+ struct cdev *cdev = inode->i_cdev;
+
+ nfc_info = NULL;
+ LOG("nfc_open\n");
+
+ for (i = 0; i < NUM_OF_NFC; i++) {
+ LOG("nfc info index = %d, cdev dev = %d, inode dev = %d\n",
+ i, pnfc_info[i]->cdev.dev, cdev->dev);
+
+ if (pnfc_info[i]->cdev.dev == cdev->dev) {
+ nfc_info = pnfc_info[i];
+ break;
+ }
+ }
+
+ filp->private_data = nfc_info;
+
+ nfc_info->guest_connected = true;
+
+
+ ret = make_buf_and_kick();
+ if (ret < 0)
+ return ret;
+
+ LOG("nfc_opened\n");
+ return 0;
+}
+
+static int nfc_close(struct inode* i, struct file* filp) {
+ struct virtnfc_info *nfc_info;
+
+ nfc_info = filp->private_data;
+ nfc_info->guest_connected = false;
+
+ LOG("nfc_closed\n");
+ return 0;
+}
+
+
+
+static ssize_t nfc_read(struct file *filp, char __user *ubuf, size_t len,
+ loff_t *f_pos)
+{
+ struct virtnfc_info *nfc;
+
+ ssize_t ret;
+ struct msg_buf* next;
+ unsigned long flags;
+
+ LOG("nfc_read\n");
+ nfc = filp->private_data;
+ if (!has_readdata(nfc)) {
+ if (filp->f_flags & O_NONBLOCK) {
+ LOG("list is empty, return EAGAIN\n");
+ return -EAGAIN;
+ }
+ return -EFAULT;
+ }
+
+ next = list_first_entry(&vnfc->read_list, struct msg_buf, list);
+ if (next == NULL) {
+ LOG("invliad list entry\n");
+ return -EFAULT;
+ }
+
+ ret = copy_to_user(ubuf, &next->msg, len);
+
+ list_del(&next->list);
+ kfree(next);
+
+ spin_lock_irqsave(&pnfc_info[NFC_READ]->inbuf_lock, flags);
+
+
+ if (add_inbuf(vnfc->rvq, &vnfc->read_msginfo) < 0){
+ LOG("failed add_buf\n");
+ }
+
+ spin_unlock_irqrestore(&pnfc_info[NFC_READ]->inbuf_lock, flags);
+
+
+ LOG("nfc_read count = %d!\n", ++g_read_count);
+
+ if (ret < 0)
+ return -EFAULT;
+
+ *f_pos += len;
+
+ return len;
+}
+
+static ssize_t nfc_write(struct file *f, const char __user *ubuf, size_t len,
+ loff_t* f_pos)
+{
+ int err = 0;
+ ssize_t ret = 0;
+
+ LOG("start of nfc_write len= %zu, msglen = %zu\n", len, sizeof(vnfc->send_msginfo));
+
+ if (vnfc == NULL) {
+ LOG("invalid nfc handle\n");
+ return 0;
+ }
+
+ memset(&vnfc->send_msginfo, 0, sizeof(vnfc->send_msginfo));
+ ret = copy_from_user(&vnfc->send_msginfo, ubuf, sizeof(vnfc->send_msginfo));
+
+ LOG("copy_from_user ret = %zd id = %02x, type = %02x, msg = %s use = %d\n",
+ ret, vnfc->send_msginfo.client_id, vnfc->send_msginfo.client_type,
+ vnfc->send_msginfo.buf, vnfc->send_msginfo.use);
+
+ if (ret) {
+ ret = -EFAULT;
+ return ret;
+ }
+
+ sg_init_one(vnfc->sg_send, &vnfc->send_msginfo, sizeof(vnfc->send_msginfo));
+
+ err = virtqueue_add_outbuf(vnfc->svq, vnfc->sg_send, 1,
+ &vnfc->send_msginfo, GFP_ATOMIC);
+
+ /*
+ err = virtqueue_add_buf(vnfc->svq, vnfc->sg_send, 1, 0,
+ &_msg, GFP_ATOMIC);*/
+
+ if (err < 0) {
+ LOG("failed to add buffer to virtqueue (err = %d)\n", err);
+ return 0;
+ }
+
+ virtqueue_kick(vnfc->svq);
+
+ LOG("send to host\n");
+
+ return len;
+}
+
+static unsigned int nfc_poll(struct file *filp, poll_table *wait)
+{
+ struct virtnfc_info *nfc;
+ unsigned int ret;
+
+ nfc = filp->private_data;
+ poll_wait(filp, &nfc->waitqueue, wait);
+
+ if (!nfc->guest_connected) {
+ /* nfc got unplugged */
+ return POLLHUP;
+ }
+
+ ret = 0;
+
+ if (has_readdata(nfc)) {
+ LOG("POLLIN | POLLRDNORM\n");
+ ret |= POLLIN | POLLRDNORM;
+ }
+
+ return ret;
+}
+
+static struct file_operations nfc_fops = {
+ .owner = THIS_MODULE,
+ .open = nfc_open,
+ .release = nfc_close,
+ .read = nfc_read,
+ .write = nfc_write,
+ .poll = nfc_poll,
+};
+
+
+
+static void nfc_recv_done(struct virtqueue *rvq) {
+
+ unsigned int len;
+ unsigned long flags;
+ unsigned char *msg;
+ struct msg_buf* msgbuf;
+ LOG("nfc_recv_done\n");
+ /* TODO : check if guest has been connected. */
+
+ msg = (unsigned char*) virtqueue_get_buf(vnfc->rvq, &len);
+ if (msg == NULL ) {
+ LOG("failed to virtqueue_get_buf\n");
+ return;
+ }
+
+ INIT_LIST_HEAD(&vnfc->read_list);
+ do {
+
+ /* insert into queue */
+ msgbuf = (struct msg_buf*) __xmalloc(SIZEOF_MSG_BUF);
+ memset(msgbuf, 0x00, sizeof(*msgbuf));
+ memcpy(&(msgbuf->msg), msg, len);
+
+ //LOG("copied msg data = %s, %s\n", msgbuf->msg.buf, msg->buf);
+
+ spin_lock_irqsave(&pnfc_info[NFC_READ]->inbuf_lock, flags);
+
+ list_add_tail(&msgbuf->list, &vnfc->read_list);
+ //LOG("== wake_up_interruptible = %d!\n", ++g_wake_up_interruptible_count);
+
+ spin_unlock_irqrestore(&pnfc_info[NFC_READ]->inbuf_lock, flags);
+
+ wake_up_interruptible(&pnfc_info[NFC_READ]->waitqueue);
+
+ msg = (unsigned char*) virtqueue_get_buf(vnfc->rvq, &len);
+ if (msg == NULL) {
+ break;
+ }
+
+ } while (true);
+ /*
+ if (add_inbuf(vnfc->rvq, &vnfc->readmsginfo) < 0)
+ {
+ LOG("failed add_buf\n");
+ }
+ */
+}
+
+static void nfc_send_done(struct virtqueue *svq) {
+ unsigned int len = 0;
+
+ virtqueue_get_buf(svq, &len);
+}
+
+/*
+ *
+ */
+
+static int init_vqs(struct virtio_nfc *nfc) {
+ struct virtqueue *vqs[2];
+ vq_callback_t *callbacks[] = { nfc_recv_done, nfc_send_done };
+ const char *names[] = { "nfc_input", "nfc_output" };
+ int err;
+
+ err = nfc->vdev->config->find_vqs(nfc->vdev, 2, vqs, callbacks, names);
+ if (err < 0)
+ return err;
+
+ nfc->rvq = vqs[0];
+ nfc->svq = vqs[1];
+
+ return 0;
+}
+
+int init_device(void)
+{
+ int i, ret;
+
+ if (alloc_chrdev_region(&nfc_dev_number, 0, NUM_OF_NFC, DEVICE_NAME) < 0) {
+ LOG("fail to alloc_chrdev_region\n");
+ return -1;
+ }
+
+ nfc_class = class_create(THIS_MODULE, DEVICE_NAME);
+
+ if (nfc_class == NULL ) {
+ unregister_chrdev_region(nfc_dev_number, NUM_OF_NFC);
+ return -1;
+ }
+
+ for (i = 0; i < NUM_OF_NFC; i++) {
+ pnfc_info[i] = kmalloc(sizeof(struct virtnfc_info), GFP_KERNEL);
+
+ if (!pnfc_info[i]) {
+ LOG("Bad malloc\n");
+ return -ENOMEM;
+ }
+
+ sprintf(pnfc_info[i]->name, "%s%d", DEVICE_NAME, i);
+
+ pnfc_info[i]->index = i;
+ pnfc_info[i]->guest_connected = false;
+
+ cdev_init(&pnfc_info[i]->cdev, &nfc_fops);
+ pnfc_info[i]->cdev.owner = THIS_MODULE;
+ ret = cdev_add(&pnfc_info[i]->cdev, (nfc_dev_number + i), 1);
+
+ /* init wait queue */
+ init_waitqueue_head(&pnfc_info[i]->waitqueue);
+ spin_lock_init(&pnfc_info[i]->inbuf_lock);
+ spin_lock_init(&pnfc_info[i]->outvq_lock);
+
+ if (ret == -1) {
+ LOG("Bad cdev\n");
+ return ret;
+ }
+
+ device_create(nfc_class, NULL, (nfc_dev_number + i), NULL, "%s%d",
+ DEVICE_NAME, i);
+ }
+
+ return 0;
+}
+
+
+static int nfc_probe(struct virtio_device* dev) {
+ int ret;
+ LOG("nfc_probe\n");
+ vnfc = kmalloc(sizeof(struct virtio_nfc), GFP_KERNEL);
+
+ INIT_LIST_HEAD(&vnfc->read_list);
+
+ vnfc->vdev = dev;
+ dev->priv = vnfc;
+
+ ret = init_device();
+ if (ret) {
+ LOG("failed to init_device\n");
+ return ret;
+ }
+ ret = init_vqs(vnfc);
+ if (ret) {
+ dev->config->del_vqs(dev);
+ kfree(vnfc);
+ dev->priv = NULL;
+
+ LOG("failed to init_vqs\n");
+ return ret;
+ }
+
+ /* enable callback */
+ virtqueue_enable_cb(vnfc->rvq);
+ virtqueue_enable_cb(vnfc->svq);
+
+
+ memset(&vnfc->read_msginfo, 0x00, sizeof(vnfc->read_msginfo));
+ sg_set_buf(vnfc->sg_read, &vnfc->read_msginfo, NFC_MAX_BUF_SIZE);
+
+ memset(&vnfc->send_msginfo, 0x00, sizeof(vnfc->send_msginfo));
+ sg_set_buf(vnfc->sg_send, &vnfc->send_msginfo, NFC_MAX_BUF_SIZE);
+
+
+ sg_init_one(vnfc->sg_read, &vnfc->read_msginfo, sizeof(vnfc->read_msginfo));
+ sg_init_one(vnfc->sg_send, &vnfc->send_msginfo, sizeof(vnfc->send_msginfo));
+
+
+
+ LOG("NFC Probe completed");
+ return 0;
+}
+
+static void nfc_remove(struct virtio_device* dev)
+{
+ struct virtio_nfc* _nfc = dev->priv;
+ if (!_nfc) {
+ LOG("nfc is NULL\n");
+ return;
+ }
+
+ dev->config->reset(dev);
+ dev->config->del_vqs(dev);
+
+ kfree(_nfc);
+
+ LOG("driver is removed.\n");
+}
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_nfc_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE ,
+ },
+ .id_table = id_table,
+ .probe = nfc_probe,
+ .remove = nfc_remove,
+};
+
+static int __init nfc_init(void)
+{
+ LOG("NFC driver initialized.\n");
+
+ return register_virtio_driver(&virtio_nfc_driver);
+}
+
+static void __exit nfc_exit(void)
+{
+ int i;
+
+ unregister_chrdev_region(nfc_dev_number, NUM_OF_NFC);
+
+ for (i = 0; i < NUM_OF_NFC; i++) {
+ device_destroy(nfc_class, MKDEV(MAJOR(nfc_dev_number), i));
+ cdev_del(&pnfc_info[i]->cdev);
+ kfree(pnfc_info[i]);
+ }
+
+ /*device_destroy(nfc_class, nfc_dev_number);*/
+
+ class_destroy(nfc_class);
+
+ unregister_virtio_driver(&virtio_nfc_driver);
+
+ LOG("NFC driver is destroyed.\n");
+}
+
+module_init(nfc_init);
+module_exit(nfc_exit);
+
+MODULE_LICENSE("GPL2");
+MODULE_AUTHOR("Munkyu Im <munkyu.im@samsung.com>");
+MODULE_DESCRIPTION("Emulator Virtio NFC Driver");
+
--- /dev/null
+/*
+ * Maru Virtio Rotary Device Driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Jinhyung Jo <jinhyung.jo@samsung.com>
+ * Sangho Park <sangho1206.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/input.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+
+MODULE_LICENSE("GPL2");
+MODULE_AUTHOR("Jinhyung Jo <jinhyung.jo@samsung.com>");
+MODULE_DESCRIPTION("Emulator Virtio Rotary Driver");
+
+#define DRIVER_NAME "tizen_detent"
+#define VR_LOG(log_level, fmt, ...) \
+ printk(log_level "%s: " fmt, DRIVER_NAME, ##__VA_ARGS__)
+
+#define ROTARY_EVENT_MAX 32
+struct rotary_event {
+ int32_t delta;
+ int32_t type;
+};
+
+#define ROTARY_EVENT_BUF_SIZE \
+ (ROTARY_EVENT_MAX * sizeof(struct rotary_event))
+
+struct virtio_rotary {
+ struct virtio_device *vdev;
+ struct virtqueue *vq;
+ struct input_dev *idev;
+ struct mutex mutex;
+
+ struct rotary_event event[ROTARY_EVENT_MAX];
+};
+
+struct virtio_rotary *vrtr;
+
+static int last_pos; /* 0 ~ 360 */
+static int last_detent;
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_ROTARY, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+#define DETENT_UNIT (15)
+#define REMAINDER(n, div) ({ \
+typeof(n) _n = (n) % (div); \
+ if (_n < 0) { \
+ _n += (div); \
+ } \
+ _n; \
+})
+
+static int add_inbuf(struct virtqueue *vq, struct rotary_event *event)
+{
+ struct scatterlist sg[1];
+ int ret;
+
+ memset(event, 0x00, ROTARY_EVENT_BUF_SIZE);
+ sg_init_one(sg, event, ROTARY_EVENT_BUF_SIZE);
+
+ ret = virtqueue_add_inbuf(vq, sg, 1, event, GFP_ATOMIC);
+ virtqueue_kick(vq);
+
+ return ret;
+}
+
+static int get_rotary_pos(int value)
+{
+ return REMAINDER(value, 360);
+}
+
+static void vq_rotary_handler(struct virtqueue *vq)
+{
+ int err = 0;
+ struct rotary_event *data;
+ unsigned int len = 0;
+ size_t j, num_event;
+
+ data = (struct rotary_event *)virtqueue_get_buf(vq, &len);
+ if (!data) {
+ VR_LOG(KERN_ERR, "there is no available buffer\n");
+ return;
+ }
+
+ num_event = (size_t)len / sizeof(struct rotary_event);
+ VR_LOG(KERN_DEBUG, "len(%u), num_event(%zu)\n", len, num_event);
+
+ for (j = 0; j < num_event; j++) {
+ int i = 0;
+ int pos = 0;
+ int value = 0;
+ struct rotary_event event;
+
+ memcpy(&event, &data[j],
+ sizeof(struct rotary_event));
+
+ event.delta %= 360;
+ if (event.delta == 0)
+ continue;
+
+ pos = get_rotary_pos(last_pos + event.delta);
+
+ VR_LOG(KERN_DEBUG,
+ "rotary event: idx(%zu), event.delta(%d), pos(%d)\n",
+ j, event.delta, pos);
+
+ for (i = 1; i <= abs(event.delta); i++) {
+ value = (event.delta > 0) ? last_pos + i : last_pos - i;
+ if ((value % DETENT_UNIT) == 0) {
+ input_report_rel(vrtr->idev, REL_WHEEL, 1);
+ input_sync(vrtr->idev);
+ if (get_rotary_pos(value) != last_detent) {
+ last_detent = get_rotary_pos(value);
+ if (event.delta > 0) { /* CW */
+ input_report_rel(vrtr->idev,
+ REL_WHEEL, 2);
+ } else { /* CCW */
+ input_report_rel(vrtr->idev,
+ REL_WHEEL, -2);
+ }
+ } else {
+ input_report_rel(vrtr->idev,
+ REL_WHEEL, -1);
+ }
+ input_sync(vrtr->idev);
+
+ VR_LOG(KERN_INFO,
+ "rotary event: delta(%d), detent(%d)\n",
+ event.delta, last_detent);
+ }
+ }
+ last_pos = pos;
+ }
+
+ err = add_inbuf(vrtr->vq, vrtr->event);
+ if (err < 0) {
+ VR_LOG(KERN_ERR, "failed to add buffer to virtqueue\n");
+ return;
+ }
+ virtqueue_kick(vrtr->vq);
+}
+
+static int input_rotary_open(struct input_dev *dev)
+{
+ VR_LOG(KERN_DEBUG, "input_rotary_open\n");
+ return 0;
+}
+
+static void input_rotary_close(struct input_dev *dev)
+{
+ VR_LOG(KERN_DEBUG, "input_rotary_close\n");
+}
+
+static int virtio_rotary_probe(struct virtio_device *vdev)
+{
+ int ret = 0;
+
+ if (vrtr) {
+ VR_LOG(KERN_ERR, "driver is already exist\n");
+ return -EINVAL;
+ }
+
+ vdev->priv = vrtr = kzalloc(sizeof(struct virtio_rotary), GFP_KERNEL);
+ if (!vrtr)
+ return -ENOMEM;
+
+ vrtr->vdev = vdev;
+ mutex_init(&vrtr->mutex);
+
+ vrtr->vq = virtio_find_single_vq(vrtr->vdev,
+ vq_rotary_handler,
+ "maru-rotary-vq");
+ if (IS_ERR(vrtr->vq)) {
+ ret = PTR_ERR(vrtr->vq);
+ kfree(vrtr);
+ vdev->priv = NULL;
+ return ret;
+ }
+
+ /* register for input device */
+ vrtr->idev = input_allocate_device();
+ if (!vrtr->idev) {
+ VR_LOG(KERN_ERR, "failed to allocate a input device\n");
+ kfree(vrtr);
+ vdev->priv = NULL;
+ return -ENOMEM;
+ }
+
+ vrtr->idev->name = DRIVER_NAME;
+ vrtr->idev->dev.parent = &vdev->dev;
+ vrtr->idev->id.vendor = 0x0001;
+ vrtr->idev->id.product = 0x0001;
+ vrtr->idev->id.version = 0x0100;
+
+ input_set_drvdata(vrtr->idev, vrtr);
+ vrtr->idev->open = input_rotary_open;
+ vrtr->idev->close = input_rotary_close;
+
+ input_set_capability(vrtr->idev, EV_REL, REL_X);
+ input_set_capability(vrtr->idev, EV_REL, REL_Y);
+ input_set_capability(vrtr->idev, EV_REL, REL_WHEEL);
+ input_set_capability(vrtr->idev, EV_KEY, BTN_LEFT);
+
+ ret = input_register_device(vrtr->idev);
+ if (ret) {
+ VR_LOG(KERN_ERR, "failed to register a input device\n");
+ input_free_device(vrtr->idev);
+ kfree(vrtr);
+ vdev->priv = NULL;
+ return ret;
+ }
+
+ ret = add_inbuf(vrtr->vq, vrtr->event);
+ if (ret < 0) {
+ VR_LOG(KERN_ERR, "failed to add buffer to virtqueue\n");
+ input_unregister_device(vrtr->idev);
+ input_free_device(vrtr->idev);
+ kfree(vrtr);
+ vdev->priv = NULL;
+ return ret;
+ }
+
+ VR_LOG(KERN_INFO, "driver probe done\n");
+
+ return 0;
+}
+
+static void virtio_rotary_remove(struct virtio_device *vdev)
+{
+ if (!vrtr) {
+ VR_LOG(KERN_ERR, "rotary instance is NULL\n");
+ return;
+ }
+
+ vdev->config->reset(vdev);
+ vdev->config->del_vqs(vdev);
+
+ input_unregister_device(vrtr->idev);
+ input_free_device(vrtr->idev);
+
+ kfree(vrtr);
+ vrtr = NULL;
+ vdev->priv = NULL;
+ VR_LOG(KERN_INFO, "driver is removed\n");
+}
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_rotary_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .id_table = id_table,
+ .probe = virtio_rotary_probe,
+ .remove = virtio_rotary_remove,
+#if 0
+#ifdef CONFIG_PM
+ .freeze = virtio_rotary_freeze,
+ .restore = virtio_rotary_restore,
+#endif
+#endif
+};
+
+static int __init virtio_rotary_init(void)
+{
+ VR_LOG(KERN_INFO, "driver is initialized\n");
+ return register_virtio_driver(&virtio_rotary_driver);
+}
+
+static void __exit virtio_rotary_exit(void)
+{
+ VR_LOG(KERN_INFO, "driver is destroyed\n");
+ unregister_virtio_driver(&virtio_rotary_driver);
+}
+
+module_init(virtio_rotary_init);
+module_exit(virtio_rotary_exit);
--- /dev/null
+/*
+ * Maru Virtio Tablet Device Driver
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Jinhyung Jo <jinhyung.jo@samsung.com>
+ * SeokYeon Hwang <syeon.hwang@samsung.com>
+ * Sungmin Ha <sungmin82.ha@samsung.com>
+ * Sangho Park <sangho.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/input.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/kthread.h>
+
+MODULE_LICENSE("GPL2");
+MODULE_AUTHOR("Sungmin Ha <sungmin82.ha@samsung.com>");
+MODULE_DESCRIPTION("Emulator Virtio Tablet driver");
+
+#define DEVICE_NAME "virtio-tablet"
+#define MAX_BUF_COUNT 25
+
+/* This structure must match the qemu definitions */
+typedef struct EmulTabletEvent {
+ uint8_t event_type;
+ uint32_t x;
+ uint32_t y;
+ uint32_t btn;
+ uint32_t btn_status;
+} EmulTabletEvent;
+
+#define MAX_EVENT_BUF_SIZE (MAX_BUF_COUNT * sizeof(EmulTabletEvent))
+
+typedef struct virtio_tablet
+{
+ struct virtio_device *vdev;
+ struct virtqueue *vq;
+ struct input_dev *idev;
+
+ struct scatterlist sg[1];
+ struct EmulTabletEvent vbuf[MAX_BUF_COUNT];
+
+ struct mutex event_mutex;
+} virtio_tablet;
+
+virtio_tablet *vtb;
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_TABLET, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+/* keep it consistent with emulator-skin definition */
+enum {
+ INPUT_MOVE = 1,
+ INPUT_BTN = 2,
+};
+
+typedef enum InputButton
+{
+ INPUT_BUTTON_LEFT = 0,
+ INPUT_BUTTON_MIDDLE = 1,
+ INPUT_BUTTON_RIGHT = 2,
+ INPUT_BUTTON_WHEEL_UP = 3,
+ INPUT_BUTTON_WHEEL_DOWN = 4,
+ INPUT_BUTTON_MAX = 5,
+} InputButton;
+
+static int err = 0;
+static unsigned int index = 0;
+
+/**
+* @brief : callback for virtqueue
+*/
+static void vq_tablet_callback(struct virtqueue *vq)
+{
+ struct EmulTabletEvent *token = NULL;
+ unsigned int len = 0;
+ size_t i, num_event;
+
+ token = (struct EmulTabletEvent *)virtqueue_get_buf(vq, &len);
+ if (!token) {
+ printk(KERN_ERR "there is no available buffer\n");
+ return;
+ }
+
+ num_event = (size_t)len / sizeof(struct EmulTabletEvent);
+
+ for (i = 0; i < num_event; i++) {
+ struct EmulTabletEvent event;
+
+ memcpy(&event, &token[i], sizeof(event));
+ if (event.event_type == 0)
+ continue;
+
+ if (event.event_type == INPUT_BTN) {
+ /* TODO: Implementation for
+ * the remaining events are required. */
+ if (event.btn == INPUT_BUTTON_LEFT) {
+ /* 0x90001 is scan code.
+ * (logitech left click) */
+ input_event(vtb->idev, EV_MSC, MSC_SCAN,
+ 0x90001);
+ input_event(vtb->idev, EV_KEY, BTN_LEFT,
+ event.btn_status);
+ input_sync(vtb->idev);
+ }
+ } else if (event.event_type == INPUT_MOVE) {
+ input_event(vtb->idev, EV_ABS, ABS_X,
+ event.x);
+ input_event(vtb->idev, EV_ABS, ABS_Y,
+ event.y);
+ input_sync(vtb->idev);
+ } else {
+ printk(KERN_ERR "Unknown event type\n");
+ break;
+ }
+ }
+ memset(vtb->vbuf, 0x00, MAX_EVENT_BUF_SIZE);
+
+ err = virtqueue_add_inbuf(vtb->vq, vtb->sg, 1,
+ (void *)vtb->vbuf, GFP_ATOMIC);
+ if (err < 0) {
+ printk(KERN_ERR "failed to add buffer to virtqueue\n");
+ return;
+ }
+
+ virtqueue_kick(vtb->vq);
+}
+
+static int virtio_tablet_probe(struct virtio_device *vdev)
+{
+ int ret = 0;
+
+ printk(KERN_INFO "virtio tablet driver is probed\n");
+
+ /* init virtio */
+ vdev->priv = vtb = kmalloc(sizeof(*vtb), GFP_KERNEL);
+ if (!vtb) {
+ return -ENOMEM;
+ }
+
+ memset(&vtb->vbuf, 0x00, sizeof(vtb->vbuf));
+ vtb->vdev = vdev;
+ vtb->vq = virtio_find_single_vq(vtb->vdev,
+ vq_tablet_callback, "virtio-tablet-vq");
+ if (IS_ERR(vtb->vq)) {
+ ret = PTR_ERR(vtb->vq);
+ kfree(vtb);
+ vdev->priv = NULL;
+ return ret;
+ }
+
+ /* enable callback */
+ virtqueue_enable_cb(vtb->vq);
+
+ /* prepare the buffers */
+ sg_init_one(vtb->sg, vtb->vbuf, MAX_EVENT_BUF_SIZE);
+
+ err = virtqueue_add_inbuf(vtb->vq, vtb->sg, 1,
+ (void *)vtb->vbuf, GFP_ATOMIC);
+
+ /* register for input device */
+ vtb->idev = input_allocate_device();
+ if (!vtb->idev) {
+ printk(KERN_ERR "failed to allocate a input tablet device\n");
+ ret = -1;
+ kfree(vtb);
+ vdev->priv = NULL;
+ return ret;
+ }
+
+ vtb->idev->name = "Maru VirtIO Tablet";
+ vtb->idev->dev.parent = &(vdev->dev);
+
+ input_set_drvdata(vtb->idev, vtb);
+
+ vtb->idev->evbit[0] = BIT_MASK(EV_KEY)
+ | BIT_MASK(EV_REL)
+ | BIT_MASK(EV_ABS)
+ | BIT_MASK(EV_MSC);
+
+ /* 32767 is max size of usbdevice tablet. */
+ input_abs_set_max(vtb->idev, ABS_X, 32767);
+ input_abs_set_max(vtb->idev, ABS_Y, 32767);
+
+ set_bit(BTN_LEFT, vtb->idev->keybit);
+ set_bit(BTN_RIGHT, vtb->idev->keybit);
+ set_bit(BTN_MIDDLE, vtb->idev->keybit);
+ set_bit(REL_WHEEL, vtb->idev->relbit);
+ set_bit(ABS_X, vtb->idev->absbit);
+ set_bit(ABS_Y, vtb->idev->absbit);
+ set_bit(MSC_SCAN, vtb->idev->mscbit);
+
+ ret = input_register_device(vtb->idev);
+ if (ret) {
+ printk(KERN_ERR "input tablet driver cannot registered\n");
+ ret = -1;
+ input_free_device(vtb->idev);
+ kfree(vtb);
+ vdev->priv = NULL;
+ return ret;
+ }
+
+ virtqueue_kick(vtb->vq);
+ index = 0;
+
+ return 0;
+}
+
+static void virtio_tablet_remove(struct virtio_device *vdev)
+{
+ virtio_tablet *vtk = NULL;
+
+ printk(KERN_INFO "virtio tablet driver is removed\n");
+
+ vtk = vdev->priv;
+
+ vdev->config->reset(vdev); /* reset device */
+ vdev->config->del_vqs(vdev); /* clean up the queues */
+
+ input_unregister_device(vtk->idev);
+
+ kfree(vtk);
+}
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_tablet_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtio_tablet_probe,
+ .remove = virtio_tablet_remove,
+};
+
+static int __init virtio_tablet_init(void)
+{
+ printk(KERN_INFO "virtio tablet device is initialized\n");
+ return register_virtio_driver(&virtio_tablet_driver);
+}
+
+static void __exit virtio_tablet_exit(void)
+{
+ printk(KERN_INFO "virtio tablet device is destroyed\n");
+ unregister_virtio_driver(&virtio_tablet_driver);
+}
+
+module_init(virtio_tablet_init);
+module_exit(virtio_tablet_exit);
--- /dev/null
+/*
+ * Maru Virtio Touchscreen Device Driver
+ *
+ * Copyright (c) 2012 - 2013 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * GiWoong Kim <giwoong.kim@samsung.com>
+ * SeokYeon Hwang <syeon.hwang@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/kthread.h>
+
+MODULE_LICENSE("GPL2");
+MODULE_AUTHOR("GiWoong Kim <giwoong.kim@samsung.com>");
+MODULE_DESCRIPTION("Emulator Virtio Touchscreen driver");
+
+
+#define DEVICE_NAME "virtio-touchscreen"
+
+/* This structure must match the qemu definitions */
+typedef struct EmulTouchEvent {
+ uint16_t x, y, z;
+ uint8_t state;
+} EmulTouchEvent;
+EmulTouchEvent *event;
+
+typedef struct virtio_touchscreen
+{
+ struct virtio_device *vdev;
+ struct virtqueue *vq;
+ struct input_dev *idev;
+
+ /* The thread servicing the touchscreen */
+ struct task_struct *thread;
+} virtio_touchscreen;
+virtio_touchscreen *vt;
+
+
+#define MAX_TRKID 10
+#define TOUCHSCREEN_RESOLUTION_X 5040
+#define TOUCHSCREEN_RESOLUTION_Y 3780
+#define ABS_PRESSURE_MAX 255
+
+#define MAX_BUF_COUNT MAX_TRKID
+struct scatterlist sg[MAX_BUF_COUNT];
+EmulTouchEvent vbuf[MAX_BUF_COUNT];
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_TOUCHSCREEN, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+
+#if 0
+/**
+ * @brief : event polling
+ */
+static int run_touchscreen(void *_vtouchscreen)
+{
+ virtio_touchscreen *vt = NULL;
+ int err = 0;
+ unsigned int len = 0; /* not used */
+ unsigned int index = 0;
+ unsigned int recv_index = 0;
+ unsigned int id = 0; /* finger id */
+
+ struct input_dev *input_dev = NULL;
+ EmulTouchEvent *event = NULL;
+
+ vt = (virtio_touchscreen *)_vtouchscreen;
+ input_dev = vt->idev;
+
+ sg_init_table(sg, MAX_BUF_COUNT);
+
+ for (index = 0; index < MAX_BUF_COUNT; index++) {
+ sg_set_buf(&sg[index], &vbuf[index], sizeof(EmulTouchEvent));
+
+ err = virtqueue_add_inbuf(vt->vq, sg, index + 1, (void *)index + 1, GFP_ATOMIC);
+ if (err < 0) {
+ printk(KERN_ERR "failed to add buf\n");
+ }
+ }
+ virtqueue_kick(vt->vq);
+
+ index = 0;
+
+ while (!kthread_should_stop())
+ {
+ while ((recv_index = (unsigned int)virtqueue_get_buf(vt->vq, &len)) == 0) {
+ cpu_relax();
+ }
+
+ do {
+ event = &vbuf[recv_index - 1];
+#if 0
+ printk(KERN_INFO "touch x=%d, y=%d, z=%d, state=%d, recv_index=%d\n",
+ event->x, event->y, event->z, event->state, recv_index);
+#endif
+
+ id = event->z;
+
+ /* Multi-touch Protocol is B */
+ if (event->state != 0)
+ { /* pressed */
+ input_mt_slot(input_dev, id);
+ input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, true);
+ input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, 10);
+ input_report_abs(input_dev, ABS_MT_POSITION_X, event->x);
+ input_report_abs(input_dev, ABS_MT_POSITION_Y, event->y);
+ }
+ else
+ { /* released */
+ input_mt_slot(input_dev, id);
+ input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, false);
+ }
+
+ input_sync(input_dev);
+
+ /* expose buffer to other end */
+ err = virtqueue_add_inbuf(vt->vq, sg, recv_index, (void *)recv_index, GFP_ATOMIC);
+ if (err < 0) {
+ printk(KERN_ERR "failed to add buf\n");
+ }
+
+ recv_index = (unsigned int)virtqueue_get_buf(vt->vq, &len);
+ if (recv_index == 0) {
+ break;
+ }
+ } while(true);
+
+ virtqueue_kick(vt->vq);
+ }
+
+ printk(KERN_INFO "virtio touchscreen thread is stopped\n");
+
+ return 0;
+}
+#endif
+
+
+int err;
+unsigned int len; /* not used */
+size_t buf_index;
+size_t recv_index;
+unsigned int finger_id; /* finger id */
+
+/**
+ * @brief : callback for virtqueue
+ */
+static void vq_touchscreen_callback(struct virtqueue *vq)
+{
+#if 0
+ printk(KERN_INFO "vq touchscreen callback\n");
+#endif
+
+ recv_index = (size_t)virtqueue_get_buf(vt->vq, &len);
+ if (recv_index == 0) {
+ printk(KERN_ERR "failed to get buffer\n");
+ return;
+ }
+
+ do {
+ event = &vbuf[recv_index - 1];
+
+#if 0
+ printk(KERN_INFO "touch x=%d, y=%d, z=%d, state=%d, recv_index=%d\n",
+ event->x, event->y, event->z, event->state, recv_index);
+#endif
+
+ finger_id = event->z;
+
+ if (finger_id < MAX_TRKID) {
+ /* Multi-touch Protocol is B */
+
+ if (event->state != 0)
+ { /* pressed */
+ input_mt_slot(vt->idev, finger_id);
+ input_mt_report_slot_state(vt->idev, MT_TOOL_FINGER, true);
+ input_report_abs(vt->idev, ABS_MT_TOUCH_MAJOR, 10);
+ input_report_abs(vt->idev, ABS_MT_POSITION_X, event->x);
+ input_report_abs(vt->idev, ABS_MT_POSITION_Y, event->y);
+ }
+ else
+ { /* released */
+ input_mt_slot(vt->idev, finger_id);
+ input_mt_report_slot_state(vt->idev, MT_TOOL_FINGER, false);
+ }
+
+ input_sync(vt->idev);
+ } else {
+ printk(KERN_ERR "%d is an invalid finger id!\n", finger_id);
+ }
+
+ /* expose buffer to other end */
+ err = virtqueue_add_inbuf(vt->vq, sg,
+ (unsigned int)recv_index, (void *)recv_index, GFP_ATOMIC);
+
+ if (err < 0) {
+ printk(KERN_ERR "failed to add buffer!\n");
+ }
+
+ recv_index = (size_t)virtqueue_get_buf(vt->vq, &len);
+ if (recv_index == 0) {
+ break;
+ }
+ } while(true);
+
+ virtqueue_kick(vt->vq);
+}
+
+static int virtio_touchscreen_open(struct inode *inode, struct file *file)
+{
+ printk(KERN_INFO "virtio touchscreen device is opened\n");
+ return 0;
+}
+
+static int virtio_touchscreen_release(struct inode *inode, struct file *file)
+{
+ printk(KERN_INFO "virtio touchscreen device is closed\n");
+ return 0;
+}
+
+static int input_touchscreen_open(struct input_dev *dev)
+{
+ printk(KERN_INFO "input touchscreen device is opened\n");
+ return 0;
+}
+
+static void input_touchscreen_close(struct input_dev *dev)
+{
+ printk(KERN_INFO "input touchscreen device is closed\n");
+}
+
+struct file_operations virtio_touchscreen_fops = {
+ .owner = THIS_MODULE,
+ .open = virtio_touchscreen_open,
+ .release = virtio_touchscreen_release,
+};
+
+extern char *saved_command_line;
+#define VM_RESOLUTION_KEY "vm_resolution="
+
+static int virtio_touchscreen_probe(struct virtio_device *vdev)
+{
+ unsigned long width = 0;
+ unsigned long height = 0;
+ char *cmdline = NULL;
+ char *value = NULL;
+ char *tmp = NULL;
+ int err = 0;
+ int ret = 0;
+
+ printk(KERN_INFO "virtio touchscreen driver is probed\n");
+
+ /* init virtio */
+ vdev->priv = vt = kmalloc(sizeof(*vt), GFP_KERNEL);
+ if (!vt) {
+ return -ENOMEM;
+ }
+
+ vt->vdev = vdev;
+
+ vt->vq = virtio_find_single_vq(vt->vdev,
+ vq_touchscreen_callback, "virtio-touchscreen-vq");
+ if (IS_ERR(vt->vq)) {
+ ret = PTR_ERR(vt->vq);
+
+ kfree(vt);
+ vdev->priv = NULL;
+ return ret;
+ }
+
+ /* enable callback */
+ virtqueue_enable_cb(vt->vq);
+
+ sg_init_table(sg, MAX_BUF_COUNT);
+
+ /* prepare the buffers */
+ for (buf_index = 0; buf_index < MAX_BUF_COUNT; buf_index++) {
+ sg_set_buf(&sg[buf_index], &vbuf[buf_index], sizeof(EmulTouchEvent));
+
+ err = virtqueue_add_inbuf(vt->vq, sg,
+ buf_index + 1, (void *)buf_index + 1, GFP_ATOMIC);
+
+ if (err < 0) {
+ printk(KERN_ERR "failed to add buffer\n");
+
+ kfree(vt);
+ vdev->priv = NULL;
+ return ret;
+ }
+ }
+
+ cmdline = kzalloc(strlen(saved_command_line) + 1, GFP_KERNEL);
+ if (cmdline) {
+ /* get VM resolution */
+ strcpy(cmdline, saved_command_line);
+ tmp = strstr(cmdline, VM_RESOLUTION_KEY);
+
+ if (tmp != NULL) {
+ tmp += strlen(VM_RESOLUTION_KEY);
+
+ value = strsep(&tmp, "x");
+ err = kstrtoul(value, 10, &width);
+ if (err) {
+ printk(KERN_WARNING "vm width option is not defined\n");
+ width = 0;
+ }
+
+ value = strsep(&tmp, " ");
+ err = kstrtoul(value, 10, &height);
+ if (err) {
+ printk(KERN_WARNING "vm height option is not defined\n");
+ height = 0;
+ }
+ }
+
+ kfree(cmdline);
+ }
+
+ if (width != 0 && height != 0) {
+ printk(KERN_INFO "emul resolution : %lux%lu\n", width, height);
+ }
+
+ /* register for input device */
+ vt->idev = input_allocate_device();
+ if (!vt->idev) {
+ printk(KERN_ERR "failed to allocate a input touchscreen device\n");
+ ret = -1;
+
+ kfree(vt);
+ vdev->priv = NULL;
+ return ret;
+ }
+
+ vt->idev->name = "Maru Virtio Touchscreen";
+ vt->idev->dev.parent = &(vdev->dev);
+
+ input_set_drvdata(vt->idev, vt);
+ vt->idev->open = input_touchscreen_open;
+ vt->idev->close = input_touchscreen_close;
+
+ vt->idev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ vt->idev->absbit[BIT_WORD(ABS_MISC)] |= BIT_MASK(ABS_MISC);
+ vt->idev->keybit[BIT_WORD(BTN_TOUCH)] |= BIT_MASK(BTN_TOUCH);
+
+ input_mt_init_slots(vt->idev, MAX_TRKID, 0);
+
+ input_set_abs_params(vt->idev, ABS_X, 0,
+ width, 0, 0);
+ input_set_abs_params(vt->idev, ABS_Y, 0,
+ height, 0, 0);
+ input_set_abs_params(vt->idev, ABS_MT_TRACKING_ID, 0,
+ MAX_TRKID, 0, 0);
+ input_set_abs_params(vt->idev, ABS_MT_TOUCH_MAJOR, 0,
+ ABS_PRESSURE_MAX, 0, 0);
+ input_set_abs_params(vt->idev, ABS_MT_POSITION_X, 0,
+ width, 0, 0);
+ input_set_abs_params(vt->idev, ABS_MT_POSITION_Y, 0,
+ height, 0, 0);
+
+ ret = input_register_device(vt->idev);
+ if (ret) {
+ printk(KERN_ERR "input touchscreen driver cannot registered\n");
+ ret = -1;
+
+ input_mt_destroy_slots(vt->idev);
+ input_free_device(vt->idev);
+ kfree(vt);
+ vdev->priv = NULL;
+ return ret;
+ }
+
+#if 0 /* using a thread */
+
+ /* Responses from the hypervisor occur through the get_buf function */
+ vt->thread = kthread_run(run_touchscreen, vt, "vtouchscreen");
+ if (IS_ERR(vt->thread)) {
+ printk(KERN_ERR "unable to start the virtio touchscreen thread\n");
+ ret = PTR_ERR(vt->thread);
+
+ input_mt_destroy_slots(vt->idev);
+ input_free_device(vt->idev);
+ kfree(vt);
+ vdev->priv = NULL;
+ return ret;
+ }
+#else /* using a callback */
+
+ virtqueue_kick(vt->vq);
+
+ buf_index = 0;
+
+#endif
+
+ return 0;
+}
+
+static void virtio_touchscreen_remove(struct virtio_device *vdev)
+{
+ virtio_touchscreen *vts = NULL;
+
+ printk(KERN_INFO "virtio touchscreen driver is removed\n");
+
+ vts = vdev->priv;
+
+ kthread_stop(vts->thread);
+
+ vdev->config->reset(vdev); /* reset device */
+ vdev->config->del_vqs(vdev); /* clean up the queues */
+
+ input_unregister_device(vts->idev);
+ input_mt_destroy_slots(vts->idev);
+
+ kfree(vts);
+}
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_touchscreen_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtio_touchscreen_probe,
+ .remove = virtio_touchscreen_remove,
+#if 0
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .config_changed =
+#ifdef CONFIG_PM
+ .freeze =
+ .restore =
+#endif
+#endif
+};
+
+static int __init virtio_touchscreen_init(void)
+{
+ printk(KERN_INFO "virtio touchscreen device is initialized\n");
+ return register_virtio_driver(&virtio_touchscreen_driver);
+}
+
+static void __exit virtio_touchscreen_exit(void)
+{
+ printk(KERN_INFO "virtio touchscreen device is destroyed\n");
+ unregister_virtio_driver(&virtio_touchscreen_driver);
+}
+
+module_init(virtio_touchscreen_init);
+module_exit(virtio_touchscreen_exit);
+
--- /dev/null
+/*
+ * Maru Virtio Virtual Modem Device Driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Sooyoung Ha <yoosah.ha@samsung.com>
+ * SeokYeon Hwang <syeon.hwang@samsung.com>
+ * Sangho Park <sangho1206.park@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/cdev.h>
+
+#define DRIVER_NAME "VMODEM"
+
+#define LOG(fmt, ...) \
+ printk(KERN_ERR "%s: " fmt, DRIVER_NAME, ##__VA_ARGS__)
+
+#define NUM_OF_VMODEM 2
+#define DEVICE_NAME "vmodem"
+
+MODULE_LICENSE("GPL2");
+MODULE_AUTHOR("Sooyoung Ha <yoosah.ha@samsung.com>");
+MODULE_DESCRIPTION("Emulator Virtio Virtual Modem Device Driver");
+
+#define __MAX_BUF_SIZE 1024
+
+enum
+{
+ route_qemu = 0,
+ route_control_server = 1,
+ route_monitor = 2
+};
+
+typedef unsigned int CSCliSN;
+
+struct msg_info {
+ char buf[__MAX_BUF_SIZE];
+
+ uint32_t route;
+ uint32_t use;
+ uint16_t count;
+ uint16_t index;
+
+ CSCliSN cclisn;
+};
+
+#define SIZEOF_MSG_INFO sizeof(struct msg_info)
+
+struct msg_buf {
+ struct msg_info msg;
+ struct list_head list;
+};
+
+#define SIZEOF_MSG_BUF sizeof(struct msg_buf)
+
+enum {
+ EVID_READ = 0, EVID_WRITE = 1
+};
+
+struct virtvmodem_info {
+
+ wait_queue_head_t waitqueue;
+ spinlock_t inbuf_lock;
+ spinlock_t outvq_lock;
+
+ struct cdev cdev;
+ char name[10];
+
+ int index;
+ bool guest_connected;
+
+} *pvmodem_info[NUM_OF_VMODEM];
+
+struct virtio_vmodem {
+ struct virtio_device* vdev;
+ struct virtqueue* rvq;
+ struct virtqueue* svq;
+
+ struct msg_info read_msginfo;
+ struct msg_info send_msginfo;
+
+ struct list_head read_list;
+ struct list_head write_list;
+
+ struct scatterlist sg_read[2];
+ struct scatterlist sg_send[2];
+};
+struct virtio_vmodem *vvmodem;
+
+static struct virtio_device_id id_table[] = { { VIRTIO_ID_VMODEM,
+ VIRTIO_DEV_ANY_ID }, { 0 }, };
+
+static dev_t vmodem_dev_number;
+static struct class* vmodem_class;
+
+
+static void* __xmalloc(size_t size)
+{
+ void* p = kmalloc(size, GFP_KERNEL);
+ if (!p) {
+ return NULL;
+ }
+ return p;
+}
+
+int make_vmodem_buf_and_kick(void)
+{
+ int ret;
+ memset(&vvmodem->read_msginfo, 0x00, sizeof(vvmodem->read_msginfo));
+ ret = virtqueue_add_inbuf(vvmodem->rvq, vvmodem->sg_read, 1, &vvmodem->read_msginfo,
+ GFP_ATOMIC );
+ if (ret < 0) {
+ LOG("failed to add buffer to virtqueue.(%d)\n", ret);
+ return ret;
+ }
+
+ virtqueue_kick(vvmodem->rvq);
+
+ return 0;
+}
+
+static int add_inbuf(struct virtqueue *vq, struct msg_info *msg)
+{
+ struct scatterlist sg[1];
+ int ret;
+
+ sg_init_one(sg, msg, sizeof(struct msg_info));
+
+ ret = virtqueue_add_inbuf(vq, sg, 1, msg, GFP_ATOMIC);
+ virtqueue_kick(vq);
+ return ret;
+}
+
+static bool has_readdata(struct virtvmodem_info *vvinfo)
+{
+ bool ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vvinfo->inbuf_lock, flags);
+
+ ret = true;
+
+ if (list_empty(&vvmodem->read_list)) {
+ ret = false;
+ }
+
+ spin_unlock_irqrestore(&vvinfo->inbuf_lock, flags);
+
+ return ret;
+}
+
+
+static int vmodem_open(struct inode* inode, struct file* filp)
+{
+ int i, ret;
+ struct virtvmodem_info* vmodem_info;
+ struct cdev *cdev = inode->i_cdev;
+
+ vmodem_info = NULL;
+ LOG("vmodem_open\n");
+
+ for (i = 0; i < NUM_OF_VMODEM; i++) {
+ LOG("vmodem info index = %d, cdev dev = %d, inode dev = %d\n",
+ i, pvmodem_info[i]->cdev.dev, cdev->dev);
+
+ if (pvmodem_info[i]->cdev.dev == cdev->dev) {
+ vmodem_info = pvmodem_info[i];
+ break;
+ }
+ }
+
+ filp->private_data = vmodem_info;
+
+ vmodem_info->guest_connected = true;
+
+
+ ret = make_vmodem_buf_and_kick();
+ if (ret < 0) {
+ return ret;
+ }
+
+ LOG("vmodem is opened\n");
+ return 0;
+}
+
+static int vmodem_close(struct inode* i, struct file* filp) {
+ struct virtvmodem_info *vvinfo;
+
+ vvinfo = filp->private_data;
+ vvinfo->guest_connected = false;
+
+ LOG("vmodem is closed\n");
+ return 0;
+}
+
+static ssize_t vmodem_read(struct file *filp, char __user *ubuf, size_t len,
+ loff_t *f_pos)
+{
+ struct virtvmodem_info *vvinfo;
+
+ ssize_t ret;
+ struct msg_buf* next;
+ unsigned long flags;
+
+ vvinfo = filp->private_data;
+
+ if (!has_readdata(vvinfo)) {
+ if (filp->f_flags & O_NONBLOCK) {
+ LOG("list is empty, return EAGAIN\n");
+ return -EAGAIN;
+ }
+ return -EFAULT;
+ }
+
+ next = list_first_entry(&vvmodem->read_list, struct msg_buf, list);
+ if (next == NULL) {
+ LOG("invliad list entry\n");
+ return -EFAULT;
+ }
+
+ ret = copy_to_user(ubuf, &next->msg, len);
+
+ list_del(&next->list);
+ kfree(next);
+
+ spin_lock_irqsave(&pvmodem_info[EVID_READ]->inbuf_lock, flags);
+
+
+ if (add_inbuf(vvmodem->rvq, &vvmodem->read_msginfo) < 0) {
+ LOG("failed add_buf\n");
+ }
+
+ spin_unlock_irqrestore(&pvmodem_info[EVID_READ]->inbuf_lock, flags);
+
+ if (ret < 0) {
+ return -EFAULT;
+ }
+
+ *f_pos += len;
+
+ return len;
+}
+
+static ssize_t vmodem_write(struct file *f, const char __user *ubuf, size_t len,
+ loff_t* f_pos)
+{
+ int err = 0;
+ ssize_t ret = 0;
+
+ if (vvmodem == NULL) {
+ LOG("invalid vmodem handle\n");
+ return 0;
+ }
+
+ memset(&vvmodem->send_msginfo, 0, sizeof(vvmodem->send_msginfo));
+ ret = copy_from_user(&vvmodem->send_msginfo, ubuf, sizeof(vvmodem->send_msginfo));
+
+ if (ret) {
+ LOG("vmodem's copy_from_user is failed\n");
+ ret = -EFAULT;
+ return ret;
+ }
+
+
+ err = virtqueue_add_outbuf(vvmodem->svq, vvmodem->sg_send, 1,
+ &vvmodem->send_msginfo, GFP_ATOMIC);
+
+ if (err < 0) {
+ LOG("failed to add buffer to virtqueue (err = %d)\n", err);
+ return 0;
+ }
+
+ virtqueue_kick(vvmodem->svq);
+ LOG("vmodem kick the data to ecs\n");
+
+ return len;
+}
+
+static unsigned int vmodem_poll(struct file *filp, poll_table *wait)
+{
+ struct virtvmodem_info *vvinfo;
+ unsigned int ret;
+
+ vvinfo = filp->private_data;
+ poll_wait(filp, &vvinfo->waitqueue, wait);
+
+ if (!vvinfo->guest_connected) {
+ return POLLHUP;
+ }
+
+ ret = 0;
+
+ if (has_readdata(vvinfo)) {
+ LOG("POLLIN | POLLRDNORM\n");
+ ret |= POLLIN | POLLRDNORM;
+ }
+
+ return ret;
+}
+
+static struct file_operations vmodem_fops = {
+ .owner = THIS_MODULE,
+ .open = vmodem_open,
+ .release = vmodem_close,
+ .read = vmodem_read,
+ .write = vmodem_write,
+ .poll = vmodem_poll,
+};
+
+
+
+static void vmodem_recv_done(struct virtqueue *rvq) {
+
+ unsigned int len;
+ unsigned long flags;
+ struct msg_info* _msg;
+ struct msg_buf* msgbuf;
+
+ _msg = (struct msg_info*) virtqueue_get_buf(vvmodem->rvq, &len);
+ if (_msg == NULL ) {
+ LOG("failed to virtqueue_get_buf\n");
+ return;
+ }
+
+ do {
+ msgbuf = (struct msg_buf*) __xmalloc(SIZEOF_MSG_BUF);
+ memset(msgbuf, 0x00, sizeof(*msgbuf));
+ memcpy(&(msgbuf->msg), _msg, sizeof(*_msg));
+
+ spin_lock_irqsave(&pvmodem_info[EVID_READ]->inbuf_lock, flags);
+
+ list_add_tail(&msgbuf->list, &vvmodem->read_list);
+
+ spin_unlock_irqrestore(&pvmodem_info[EVID_READ]->inbuf_lock, flags);
+
+ wake_up_interruptible(&pvmodem_info[EVID_READ]->waitqueue);
+
+ _msg = (struct msg_info*) virtqueue_get_buf(vvmodem->rvq, &len);
+ if (_msg == NULL) {
+ break;
+ }
+
+ } while (true);
+}
+
+static void vmodem_send_done(struct virtqueue *svq) {
+ unsigned int len = 0;
+
+ LOG("vmodem send done\n");
+ virtqueue_get_buf(svq, &len);
+}
+
+static int init_vqs(struct virtio_vmodem *v_vmodem) {
+ struct virtqueue *vqs[2];
+ vq_callback_t *callbacks[] = { vmodem_recv_done, vmodem_send_done };
+ const char *names[] = { "vmodem_input", "vmodem_output" };
+ int err;
+
+ err = v_vmodem->vdev->config->find_vqs(v_vmodem->vdev, 2, vqs, callbacks, names);
+ if (err < 0) {
+ LOG("find_vqs of vmodem device is failed\n");
+ return err;
+ }
+
+ v_vmodem->rvq = vqs[0];
+ v_vmodem->svq = vqs[1];
+
+ return 0;
+}
+
+int init_vmodem_device(void)
+{
+ int i, ret;
+
+ if (alloc_chrdev_region(&vmodem_dev_number, 0, NUM_OF_VMODEM, DEVICE_NAME) < 0) {
+ LOG("fail to alloc_chrdev_region\n");
+ return -1;
+ }
+
+ vmodem_class = class_create(THIS_MODULE, DEVICE_NAME);
+
+ if (vmodem_class == NULL ) {
+ unregister_chrdev_region(vmodem_dev_number, NUM_OF_VMODEM);
+ return -1;
+ }
+
+ for (i = 0; i < NUM_OF_VMODEM; i++) {
+ pvmodem_info[i] = kmalloc(sizeof(struct virtvmodem_info), GFP_KERNEL);
+ if (!pvmodem_info[i]) {
+ LOG("pvmodem_info malloc is failed\n");
+ return -ENOMEM;
+ }
+
+ sprintf(pvmodem_info[i]->name, "%s%d", DEVICE_NAME, i);
+
+ pvmodem_info[i]->index = i;
+ pvmodem_info[i]->guest_connected = false;
+
+ cdev_init(&pvmodem_info[i]->cdev, &vmodem_fops);
+ pvmodem_info[i]->cdev.owner = THIS_MODULE;
+ ret = cdev_add(&pvmodem_info[i]->cdev, (vmodem_dev_number + i), 1);
+
+ init_waitqueue_head(&pvmodem_info[i]->waitqueue);
+ spin_lock_init(&pvmodem_info[i]->inbuf_lock);
+ spin_lock_init(&pvmodem_info[i]->outvq_lock);
+
+ if (ret == -1) {
+ LOG("cdev_add(%d) is failed\n", i);
+ return ret;
+ }
+
+ device_create(vmodem_class, NULL, (vmodem_dev_number + i), NULL, "%s%d",
+ DEVICE_NAME, i);
+ }
+
+ return 0;
+}
+
+
+static int vmodem_probe(struct virtio_device* dev) {
+ int ret;
+
+ vvmodem = kmalloc(sizeof(struct virtio_vmodem), GFP_KERNEL);
+
+ INIT_LIST_HEAD(&vvmodem->read_list);
+
+ vvmodem->vdev = dev;
+ dev->priv = vvmodem;
+
+ ret = init_vmodem_device();
+ if (ret) {
+ LOG("failed to init_vmodem_device\n");
+ return ret;
+ }
+ ret = init_vqs(vvmodem);
+ if (ret) {
+ dev->config->del_vqs(dev);
+ kfree(vvmodem);
+ dev->priv = NULL;
+
+ LOG("failed to init_vqs\n");
+ return ret;
+ }
+
+ virtqueue_enable_cb(vvmodem->rvq);
+ virtqueue_enable_cb(vvmodem->svq);
+
+ memset(&vvmodem->read_msginfo, 0x00, sizeof(vvmodem->read_msginfo));
+ sg_set_buf(vvmodem->sg_read, &vvmodem->read_msginfo, sizeof(struct msg_info));
+
+ memset(&vvmodem->send_msginfo, 0x00, sizeof(vvmodem->send_msginfo));
+ sg_set_buf(vvmodem->sg_send, &vvmodem->send_msginfo, sizeof(struct msg_info));
+
+ sg_init_one(vvmodem->sg_read, &vvmodem->read_msginfo, sizeof(vvmodem->read_msginfo));
+ sg_init_one(vvmodem->sg_send, &vvmodem->send_msginfo, sizeof(vvmodem->send_msginfo));
+
+ LOG("vmodem is probed");
+ return 0;
+}
+
+static void vmodem_remove(struct virtio_device* dev)
+{
+ struct virtio_vmodem* _vmodem = dev->priv;
+ if (!_vmodem) {
+ LOG("vmodem is NULL\n");
+ return;
+ }
+
+ dev->config->reset(dev);
+ dev->config->del_vqs(dev);
+
+ kfree(_vmodem);
+
+ LOG("driver is removed.\n");
+}
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_vmodem_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE ,
+ },
+ .id_table = id_table,
+ .probe = vmodem_probe,
+ .remove = vmodem_remove,
+};
+
+static int __init vmodem_init(void)
+{
+ LOG("VMODEM driver initialized.\n");
+
+ return register_virtio_driver(&virtio_vmodem_driver);
+}
+
+static void __exit vmodem_exit(void)
+{
+ int i;
+
+ unregister_chrdev_region(vmodem_dev_number, NUM_OF_VMODEM);
+
+ for (i = 0; i < NUM_OF_VMODEM; i++) {
+ device_destroy(vmodem_class, MKDEV(MAJOR(vmodem_dev_number), i));
+ cdev_del(&pvmodem_info[i]->cdev);
+ kfree(pvmodem_info[i]);
+ }
+
+ class_destroy(vmodem_class);
+
+ unregister_virtio_driver(&virtio_vmodem_driver);
+
+ LOG("VMODEM driver is destroyed.\n");
+}
+
+module_init(vmodem_init);
+module_exit(vmodem_exit);
+
--- /dev/null
+obj-$(CONFIG_MARU_VIRTIO_SENSOR) += maru_virtio_sensor.o \
+ maru_accel.o \
+ maru_geo.o \
+ maru_gyro.o \
+ maru_light.o \
+ maru_proxi.o \
+ maru_rotation_vector.o \
+ maru_haptic.o \
+ maru_pressure.o \
+ maru_uv.o \
+ maru_hrm.o
--- /dev/null
+/*
+ * Maru Virtio Accelerometer Sensor Device Driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Jinhyung Choi <jinhyung2.choi@samsung.com>
+ * Sangho Park <sangho1206.park@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "maru_virtio_sensor.h"
+
+struct maru_accel_data {
+ struct input_dev *input_data;
+ struct delayed_work work;
+
+ struct virtio_sensor* vs;
+
+ atomic_t enable;
+ atomic_t poll_delay;
+};
+
+static struct device *accel_sensor_device;
+
+#ifdef SUPPORT_LEGACY_SENSOR
+static struct device *l_accel_sensor_device;
+#endif
+
+#define GRAVITY_CHANGE_UNIT 15322
+static short sensor_convert_data(int number)
+{
+ int temp;
+ temp = number / 64;
+ temp = temp * (SHRT_MAX / 2);
+ temp = temp / GRAVITY_CHANGE_UNIT;
+ return (short)temp;
+}
+
+static void maru_accel_input_work_func(struct work_struct *work) {
+
+ int poll_time = 200000000;
+ int enable = 0;
+ int ret = 0;
+ int accel_x, accel_y, accel_z;
+ short raw_x, raw_y, raw_z;
+ char sensor_data[__MAX_BUF_SENSOR];
+ struct maru_accel_data *data = container_of((struct delayed_work *)work,
+ struct maru_accel_data, work);
+
+ LOG(1, "maru_accel_input_work_func starts");
+
+ enable = atomic_read(&data->enable);
+ poll_time = atomic_read(&data->poll_delay);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+
+ if (enable) {
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_accel, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (!ret) {
+ sscanf(sensor_data, "%d,%d,%d", &accel_x, &accel_y, &accel_z);
+ LOG(1, "accel_set act %d, %d, %d", accel_x, accel_y, accel_z);
+ raw_x = sensor_convert_data(accel_x);
+ raw_y = sensor_convert_data(accel_y);
+ raw_z = sensor_convert_data(accel_z);
+ LOG(1, "accel_set raw %d, %d, %d", raw_x, raw_y, raw_z);
+
+ if (raw_x == 0) {
+ raw_x = 1;
+ }
+
+ if (raw_y == 0) {
+ raw_y = 1;
+ }
+
+ if (raw_z == 0) {
+ raw_z = 1;
+ }
+
+ input_report_rel(data->input_data, REL_X, raw_x);
+ input_report_rel(data->input_data, REL_Y, raw_y);
+ input_report_rel(data->input_data, REL_Z, raw_z);
+ input_sync(data->input_data);
+ }
+ }
+
+ enable = atomic_read(&data->enable);
+
+ LOG(1, "enable: %d, poll_time: %d", enable, poll_time);
+ if (enable) {
+ if (poll_time > 0) {
+ schedule_delayed_work(&data->work, nsecs_to_jiffies(poll_time));
+ } else {
+ schedule_delayed_work(&data->work, 0);
+ }
+ }
+
+ LOG(1, "maru_accel_input_work_func ends");
+
+}
+
+static ssize_t maru_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_ACCEL_DEVICE_NAME);
+}
+
+static ssize_t maru_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_SENSOR_DEVICE_VENDOR);
+}
+
+static ssize_t maru_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_accel_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_accel_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_accel_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value != 0 && value != 1)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_accel_enable, buf);
+ mutex_unlock(&data->vs->vqlock);
+
+ if (value) {
+ if (atomic_read(&data->enable) != 1) {
+ atomic_set(&data->enable, 1);
+ schedule_delayed_work(&data->work, 0);
+
+ }
+ } else {
+ if (atomic_read(&data->enable) != 0) {
+ atomic_set(&data->enable, 0);
+ cancel_delayed_work(&data->work);
+ }
+ }
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static ssize_t maru_poll_delay_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_accel_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_accel_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_poll_delay_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_accel_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value < __MIN_DELAY_SENSOR)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_accel_delay, buf);
+ mutex_unlock(&data->vs->vqlock);
+ atomic_set(&data->poll_delay, value);
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static struct device_attribute dev_attr_sensor_name =
+ __ATTR(name, S_IRUGO, maru_name_show, NULL);
+
+static struct device_attribute dev_attr_sensor_vendor =
+ __ATTR(vendor, S_IRUGO, maru_vendor_show, NULL);
+
+static struct device_attribute *accel_sensor_attrs [] = {
+ &dev_attr_sensor_name,
+ &dev_attr_sensor_vendor,
+ NULL,
+};
+
+#ifdef SUPPORT_LEGACY_SENSOR
+#define ACCEL_NAME_STR "accel_sim"
+
+static ssize_t accel_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", ACCEL_NAME_STR);
+}
+
+static ssize_t xyz_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_accel_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_accel, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t xyz_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_accel_data *data = input_get_drvdata(input_data);
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_accel, buf);
+ mutex_unlock(&data->vs->vqlock);
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static struct device_attribute dev_attr_l_sensor_name =
+ __ATTR(name, S_IRUGO, accel_name_show, NULL);
+
+static DEVICE_ATTR(xyz, 0644, xyz_show, xyz_store);
+
+static struct device_attribute *l_accel_sensor_attrs [] = {
+ &dev_attr_l_sensor_name,
+ &dev_attr_xyz,
+ NULL,
+};
+#endif
+
+static struct device_attribute attr_accel [] =
+{
+ MARU_ATTR_RW(enable),
+ MARU_ATTR_RW(poll_delay),
+};
+
+static struct attribute *maru_accel_attribute[] = {
+ &attr_accel[0].attr,
+ &attr_accel[1].attr,
+ NULL
+};
+
+static struct attribute_group maru_accel_attribute_group = {
+ .attrs = maru_accel_attribute
+};
+
+static void accel_clear(struct maru_accel_data *data) {
+ if (data == NULL)
+ return;
+
+ if (data->input_data) {
+ sysfs_remove_group(&data->input_data->dev.kobj,
+ &maru_accel_attribute_group);
+ input_free_device(data->input_data);
+ }
+
+ kfree(data);
+ data = NULL;
+}
+
+static int set_initial_value(struct maru_accel_data *data)
+{
+ int delay = 0;
+ int ret = 0;
+ char sensor_data [__MAX_BUF_SENSOR];
+
+ memset(sensor_data, 0, sizeof(sensor_data));
+
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_accel_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret) {
+ ERR("failed to get initial delay time");
+ return ret;
+ }
+
+ delay = sensor_atoi(sensor_data);
+ if (delay < 0) {
+ ERR("weird value is set initial delay");
+ return ret;
+ }
+
+ atomic_set(&data->poll_delay, delay);
+
+ memset(sensor_data, 0, sizeof(sensor_data));
+ sensor_data[0] = '0';
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_accel_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ atomic_set(&data->enable, 0);
+
+ return ret;
+}
+
+static int create_input_device(struct maru_accel_data *data)
+{
+ int ret = 0;
+ struct input_dev *input_data = NULL;
+
+ input_data = input_allocate_device();
+ if (input_data == NULL) {
+ ERR("failed initialing input handler");
+ accel_clear(data);
+ return -ENOMEM;
+ }
+
+ input_data->name = SENSOR_ACCEL_INPUT_NAME;
+ input_data->id.bustype = BUS_I2C;
+
+ set_bit(EV_REL, input_data->evbit);
+ set_bit(EV_SYN, input_data->evbit);
+ input_set_capability(input_data, EV_REL, REL_X);
+ input_set_capability(input_data, EV_REL, REL_Y);
+ input_set_capability(input_data, EV_REL, REL_Z);
+
+ data->input_data = input_data;
+
+ ret = input_register_device(input_data);
+ if (ret) {
+ ERR("failed to register input data");
+ accel_clear(data);
+ return ret;
+ }
+
+ input_set_drvdata(input_data, data);
+
+ ret = sysfs_create_group(&input_data->dev.kobj,
+ &maru_accel_attribute_group);
+ if (ret) {
+ accel_clear(data);
+ ERR("failed initialing devices");
+ return ret;
+ }
+
+ return ret;
+}
+
+int maru_accel_init(struct virtio_sensor *vs) {
+ int ret = 0;
+ struct maru_accel_data *data = NULL;
+
+ INFO("maru_accel device init starts.");
+
+ data = kmalloc(sizeof(struct maru_accel_data), GFP_KERNEL);
+ if (data == NULL) {
+ ERR("failed to create accel data.");
+ return -ENOMEM;
+ }
+
+ vs->accel_handle = data;
+ data->vs = vs;
+
+ INIT_DELAYED_WORK(&data->work, maru_accel_input_work_func);
+
+ // create name & vendor
+ ret = register_sensor_device(accel_sensor_device, vs,
+ accel_sensor_attrs, DRIVER_ACCEL_NAME);
+ if (ret) {
+ ERR("failed to register accel device");
+ accel_clear(data);
+ return -1;
+ }
+
+#ifdef SUPPORT_LEGACY_SENSOR
+ ret = l_register_sensor_device(l_accel_sensor_device, vs,
+ l_accel_sensor_attrs, DRIVER_ACCEL_NAME);
+ if (ret) {
+ ERR("failed to register legacy accel device");
+ accel_clear(data);
+ return -1;
+ }
+#endif
+
+ // create input
+ ret = create_input_device(data);
+ if (ret) {
+ ERR("failed to create input device");
+ return ret;
+ }
+
+ // set initial delay & enable
+ ret = set_initial_value(data);
+ if (ret) {
+ ERR("failed to set initial value");
+ return ret;
+ }
+
+ INFO("maru_accel device init ends.");
+
+ return ret;
+}
+
+int maru_accel_exit(struct virtio_sensor *vs) {
+ struct maru_accel_data *data = NULL;
+
+ data = (struct maru_accel_data *)vs->accel_handle;
+ accel_clear(data);
+ INFO("maru_accel device exit ends.");
+ return 0;
+}
--- /dev/null
+/*
+ * Maru Virtio Geomagnetic Sensor Device Driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Jinhyung Choi <jinhyung2.choi@samsung.com>
+ * Sangho Park <sangho1206.park@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "maru_virtio_sensor.h"
+
+struct maru_geo_data {
+ struct input_dev *input_data;
+ struct delayed_work work;
+
+ struct virtio_sensor* vs;
+
+ atomic_t enable;
+ atomic_t poll_delay;
+};
+
+static struct device *geo_sensor_device;
+
+#ifdef SUPPORT_LEGACY_SENSOR
+static struct device *l_geo_sensor_device;
+#endif
+
+static short sensor_convert_data(int number)
+{
+ return (short) ((number * 5) / 3); // divided by 0.6
+}
+
+static void maru_geo_input_work_func(struct work_struct *work) {
+
+ int poll_time = 200000000;
+ int enable = 0;
+ int ret = 0;
+ int geo_x, geo_y, geo_z, hdst = 4;
+ short raw_x, raw_y, raw_z;
+ char sensor_data[__MAX_BUF_SENSOR];
+ struct maru_geo_data *data = container_of((struct delayed_work *)work,
+ struct maru_geo_data, work);
+
+ LOG(1, "maru_geo_input_work_func starts");
+
+ enable = atomic_read(&data->enable);
+ poll_time = atomic_read(&data->poll_delay);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+
+ if (enable) {
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_mag, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (!ret) {
+ sscanf(sensor_data, "%d %d %d", &geo_x, &geo_y, &geo_z);
+ LOG(1, "geo_set act %d, %d, %d", geo_x, geo_y, geo_z);
+ raw_x = sensor_convert_data(geo_x);
+ raw_y = sensor_convert_data(geo_y);
+ raw_z = sensor_convert_data(geo_z);
+ LOG(1, "geo_set raw %d, %d, %d, %d", raw_x, raw_y, raw_z, hdst);
+
+ if (raw_x == 0) {
+ raw_x = 1;
+ }
+
+ if (raw_y == 0) {
+ raw_y = 1;
+ }
+
+ if (raw_z == 0) {
+ raw_z = 1;
+ }
+
+ input_report_rel(data->input_data, REL_RX, raw_x);
+ input_report_rel(data->input_data, REL_RY, raw_y);
+ input_report_rel(data->input_data, REL_RZ, raw_z);
+ input_report_rel(data->input_data, REL_HWHEEL, hdst);
+ input_sync(data->input_data);
+ }
+ }
+
+ enable = atomic_read(&data->enable);
+
+ LOG(1, "enable: %d, poll_time: %d", enable, poll_time);
+ if (enable) {
+ if (poll_time > 0) {
+ schedule_delayed_work(&data->work, nsecs_to_jiffies(poll_time));
+ } else {
+ schedule_delayed_work(&data->work, 0);
+ }
+ }
+
+ LOG(1, "maru_geo_input_work_func ends");
+
+}
+
+static ssize_t maru_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_GEO_DEVICE_NAME);
+}
+
+static ssize_t maru_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_SENSOR_DEVICE_VENDOR);
+}
+
+static ssize_t maru_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_geo_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_geo_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_geo_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value != 0 && value != 1)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_geo_enable, buf);
+ mutex_unlock(&data->vs->vqlock);
+
+ if (value) {
+ if (atomic_read(&data->enable) != 1) {
+ atomic_set(&data->enable, 1);
+ schedule_delayed_work(&data->work, 0);
+ }
+ } else {
+ if (atomic_read(&data->enable) != 0) {
+ atomic_set(&data->enable, 0);
+ cancel_delayed_work(&data->work);
+ }
+ }
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static ssize_t maru_poll_delay_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_geo_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_geo_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_poll_delay_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_geo_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value < __MIN_DELAY_SENSOR)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_geo_delay, buf);
+ mutex_unlock(&data->vs->vqlock);
+ atomic_set(&data->poll_delay, value);
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static struct device_attribute dev_attr_sensor_name =
+ __ATTR(name, S_IRUGO, maru_name_show, NULL);
+
+static struct device_attribute dev_attr_sensor_vendor =
+ __ATTR(vendor, S_IRUGO, maru_vendor_show, NULL);
+
+static struct device_attribute *geo_sensor_attrs [] = {
+ &dev_attr_sensor_name,
+ &dev_attr_sensor_vendor,
+ NULL,
+};
+
+#ifdef SUPPORT_LEGACY_SENSOR
+#define GEO_NAME_STR "geo_sim"
+
+static ssize_t geo_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", GEO_NAME_STR);
+}
+
+static ssize_t raw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_geo_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_tilt, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t raw_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_geo_data *data = input_get_drvdata(input_data);
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_tilt, buf);
+ mutex_unlock(&data->vs->vqlock);
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static ssize_t tesla_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_geo_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_mag, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t tesla_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_geo_data *data = input_get_drvdata(input_data);
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_mag, buf);
+ mutex_unlock(&data->vs->vqlock);
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static struct device_attribute dev_attr_l_sensor_name =
+ __ATTR(name, S_IRUGO, geo_name_show, NULL);
+
+static DEVICE_ATTR(raw, 0644, raw_show, raw_store);
+static DEVICE_ATTR(tesla, 0644, tesla_show, tesla_store);
+
+static struct device_attribute *l_geo_sensor_attrs [] = {
+ &dev_attr_l_sensor_name,
+ &dev_attr_raw,
+ &dev_attr_tesla,
+ NULL,
+};
+#endif
+
+static struct device_attribute attr_geo [] =
+{
+ MARU_ATTR_RW(enable),
+ MARU_ATTR_RW(poll_delay),
+};
+
+static struct attribute *maru_geo_attribute[] = {
+ &attr_geo[0].attr,
+ &attr_geo[1].attr,
+ NULL
+};
+
+static struct attribute_group maru_geo_attribute_group = {
+ .attrs = maru_geo_attribute
+};
+
+static void geo_clear(struct maru_geo_data *data) {
+ if (data == NULL)
+ return;
+
+ if (data->input_data) {
+ sysfs_remove_group(&data->input_data->dev.kobj,
+ &maru_geo_attribute_group);
+ input_free_device(data->input_data);
+ }
+
+ kfree(data);
+ data = NULL;
+}
+
+static int set_initial_value(struct maru_geo_data *data)
+{
+ int delay = 0;
+ int ret = 0;
+ char sensor_data [__MAX_BUF_SENSOR];
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_geo_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret) {
+ ERR("failed to get initial delay time");
+ return ret;
+ }
+
+ delay = sensor_atoi(sensor_data);
+ if (delay < 0) {
+ ERR("weird value is set initial delay");
+ return ret;
+ }
+
+ atomic_set(&data->poll_delay, delay);
+
+ memset(sensor_data, 0, sizeof(sensor_data));
+ sensor_data[0] = '0';
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_geo_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ atomic_set(&data->enable, 0);
+
+ return ret;
+}
+
+static int create_input_device(struct maru_geo_data *data)
+{
+ int ret = 0;
+ struct input_dev *input_data = NULL;
+
+ input_data = input_allocate_device();
+ if (input_data == NULL) {
+ ERR("failed initialing input handler");
+ geo_clear(data);
+ return -ENOMEM;
+ }
+
+ input_data->name = SENSOR_GEO_INPUT_NAME;
+ input_data->id.bustype = BUS_I2C;
+
+ set_bit(EV_REL, input_data->evbit);
+ set_bit(EV_SYN, input_data->evbit);
+ input_set_capability(input_data, EV_REL, REL_RX);
+ input_set_capability(input_data, EV_REL, REL_RY);
+ input_set_capability(input_data, EV_REL, REL_RZ);
+ input_set_capability(input_data, EV_REL, REL_HWHEEL);
+
+ data->input_data = input_data;
+
+ ret = input_register_device(input_data);
+ if (ret) {
+ ERR("failed to register input data");
+ geo_clear(data);
+ return ret;
+ }
+
+ input_set_drvdata(input_data, data);
+
+ ret = sysfs_create_group(&input_data->dev.kobj,
+ &maru_geo_attribute_group);
+ if (ret) {
+ geo_clear(data);
+ ERR("failed initialing devices");
+ return ret;
+ }
+
+ return ret;
+}
+
+int maru_geo_init(struct virtio_sensor *vs) {
+ int ret = 0;
+ struct maru_geo_data *data = NULL;
+
+ INFO("maru_geo device init starts.");
+
+ data = kmalloc(sizeof(struct maru_geo_data), GFP_KERNEL);
+ if (data == NULL) {
+ ERR("failed to create geo data.");
+ return -ENOMEM;
+ }
+
+ vs->geo_handle = data;
+ data->vs = vs;
+
+ INIT_DELAYED_WORK(&data->work, maru_geo_input_work_func);
+
+ // create name & vendor
+ ret = register_sensor_device(geo_sensor_device, vs,
+ geo_sensor_attrs, DRIVER_GEO_NAME);
+ if (ret) {
+ ERR("failed to register geo device");
+ geo_clear(data);
+ return -1;
+ }
+
+#ifdef SUPPORT_LEGACY_SENSOR
+ ret = l_register_sensor_device(l_geo_sensor_device, vs,
+ l_geo_sensor_attrs, DRIVER_GEO_NAME);
+ if (ret) {
+ ERR("failed to register legacy geo device");
+ geo_clear(data);
+ return -1;
+ }
+#endif
+
+ // create input
+ ret = create_input_device(data);
+ if (ret) {
+ ERR("failed to create input device");
+ return ret;
+ }
+
+ // set initial delay & enable
+ ret = set_initial_value(data);
+ if (ret) {
+ ERR("failed to set initial value");
+ return ret;
+ }
+
+ INFO("maru_geo device init ends.");
+
+ return ret;
+}
+
+int maru_geo_exit(struct virtio_sensor *vs) {
+ struct maru_geo_data *data = NULL;
+
+ data = (struct maru_geo_data *)vs->geo_handle;
+ geo_clear(data);
+ INFO("maru_geo device exit ends.");
+ return 0;
+}
--- /dev/null
+/*
+ * Maru Virtio Gyroscope Sensor Device Driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Jinhyung Choi <jinhyung2.choi@samsung.com>
+ * Sangho Park <sangho1206.park@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "maru_virtio_sensor.h"
+
+struct maru_gyro_data {
+ struct input_dev *input_data;
+ struct delayed_work work;
+
+ struct virtio_sensor* vs;
+
+ atomic_t enable;
+ atomic_t poll_delay;
+};
+
+static struct device *gyro_sensor_device;
+
+#ifdef SUPPORT_LEGACY_SENSOR
+static struct device *l_gyro_sensor_device;
+#endif
+
+static int gyro_adjust_x = 1;
+static int gyro_adjust_y = 1;
+static int gyro_adjust_z = 1;
+
+static void maru_gyro_input_work_func(struct work_struct *work) {
+
+ int poll_time = 200000000;
+ int enable = 0;
+ int ret = 0;
+ int gyro_x, gyro_y, gyro_z;
+ char sensor_data[__MAX_BUF_SENSOR];
+ struct maru_gyro_data *data = container_of((struct delayed_work *)work,
+ struct maru_gyro_data, work);
+
+ LOG(1, "maru_gyro_input_work_func starts");
+
+ enable = atomic_read(&data->enable);
+ poll_time = atomic_read(&data->poll_delay);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+
+ if (enable) {
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_gyro, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (!ret) {
+ sscanf(sensor_data, "%d,%d,%d", &gyro_x, &gyro_y, &gyro_z);
+ LOG(1, "gyro_set act %d, %d, %d", gyro_x, gyro_y, gyro_z);
+
+ if (gyro_x == 0) {
+ gyro_x = gyro_adjust_x;
+ gyro_adjust_x *= -1;
+ }
+
+ if (gyro_y == 0) {
+ gyro_y = gyro_adjust_y;
+ gyro_adjust_y *= -1;
+ }
+
+ if (gyro_z == 0) {
+ gyro_z = gyro_adjust_z;
+ gyro_adjust_z *= -1;
+ }
+
+ input_report_rel(data->input_data, REL_RX, gyro_x);
+ input_report_rel(data->input_data, REL_RY, gyro_y);
+ input_report_rel(data->input_data, REL_RZ, gyro_z);
+ input_sync(data->input_data);
+ }
+ }
+
+ enable = atomic_read(&data->enable);
+
+ LOG(1, "enable: %d, poll_time: %d", enable, poll_time);
+ if (enable) {
+ if (poll_time > 0) {
+ schedule_delayed_work(&data->work, nsecs_to_jiffies(poll_time));
+ } else {
+ schedule_delayed_work(&data->work, 0);
+ }
+ }
+
+ LOG(1, "maru_gyro_input_work_func ends");
+}
+
+static ssize_t maru_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_GYRO_DEVICE_NAME);
+}
+
+static ssize_t maru_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_SENSOR_DEVICE_VENDOR);
+}
+
+static ssize_t maru_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_gyro_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_gyro_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_gyro_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value != 0 && value != 1)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_gyro_enable, buf);
+ mutex_unlock(&data->vs->vqlock);
+
+ if (value) {
+ if (atomic_read(&data->enable) != 1) {
+ atomic_set(&data->enable, 1);
+ schedule_delayed_work(&data->work, 0);
+
+ }
+ } else {
+ if (atomic_read(&data->enable) != 0) {
+ atomic_set(&data->enable, 0);
+ cancel_delayed_work(&data->work);
+ }
+ }
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static ssize_t maru_poll_delay_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_gyro_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_gyro_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_poll_delay_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_gyro_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value < __MIN_DELAY_SENSOR)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_gyro_delay, buf);
+ mutex_unlock(&data->vs->vqlock);
+ atomic_set(&data->poll_delay, value);
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static struct device_attribute dev_attr_sensor_name =
+ __ATTR(name, S_IRUGO, maru_name_show, NULL);
+
+static struct device_attribute dev_attr_sensor_vendor =
+ __ATTR(vendor, S_IRUGO, maru_vendor_show, NULL);
+
+static struct device_attribute *gyro_sensor_attrs [] = {
+ &dev_attr_sensor_name,
+ &dev_attr_sensor_vendor,
+ NULL,
+};
+
+#ifdef SUPPORT_LEGACY_SENSOR
+#define GYRO_NAME_STR "gyro_sim"
+
+static ssize_t gyro_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", GYRO_NAME_STR);
+}
+
+static ssize_t gyro_x_raw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_gyro_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_gyro_x, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t gyro_x_raw_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_gyro_data *data = input_get_drvdata(input_data);
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_gyro_x, buf);
+ mutex_unlock(&data->vs->vqlock);
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static ssize_t gyro_y_raw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_gyro_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_gyro_y, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t gyro_y_raw_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_gyro_data *data = input_get_drvdata(input_data);
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_gyro_y, buf);
+ mutex_unlock(&data->vs->vqlock);
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static ssize_t gyro_z_raw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_gyro_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_gyro_z, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t gyro_z_raw_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_gyro_data *data = input_get_drvdata(input_data);
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_gyro_z, buf);
+ mutex_unlock(&data->vs->vqlock);
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static struct device_attribute dev_attr_l_sensor_name =
+ __ATTR(name, S_IRUGO, gyro_name_show, NULL);
+
+static DEVICE_ATTR(gyro_x_raw, 0644, gyro_x_raw_show, gyro_x_raw_store);
+static DEVICE_ATTR(gyro_y_raw, 0644, gyro_y_raw_show, gyro_y_raw_store);
+static DEVICE_ATTR(gyro_z_raw, 0644, gyro_z_raw_show, gyro_z_raw_store);
+
+static struct device_attribute *l_gyro_sensor_attrs [] = {
+ &dev_attr_l_sensor_name,
+ &dev_attr_gyro_x_raw,
+ &dev_attr_gyro_y_raw,
+ &dev_attr_gyro_z_raw,
+ NULL,
+};
+#endif
+
+static struct device_attribute attr_gyro [] =
+{
+ MARU_ATTR_RW(enable),
+ MARU_ATTR_RW(poll_delay),
+};
+
+static struct attribute *maru_gyro_attribute[] = {
+ &attr_gyro[0].attr,
+ &attr_gyro[1].attr,
+ NULL
+};
+
+static struct attribute_group maru_gyro_attribute_group = {
+ .attrs = maru_gyro_attribute
+};
+
+static void gyro_clear(struct maru_gyro_data *data) {
+ if (data == NULL)
+ return;
+
+ if (data->input_data) {
+ sysfs_remove_group(&data->input_data->dev.kobj,
+ &maru_gyro_attribute_group);
+ input_free_device(data->input_data);
+ }
+
+ kfree(data);
+ data = NULL;
+}
+
+static int set_initial_value(struct maru_gyro_data *data)
+{
+ int delay = 0;
+ int ret = 0;
+ char sensor_data [__MAX_BUF_SENSOR];
+
+ memset(sensor_data, 0, sizeof(sensor_data));
+
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_gyro_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret) {
+ ERR("failed to get initial delay time");
+ return ret;
+ }
+
+ delay = sensor_atoi(sensor_data);
+ if (delay < 0) {
+ ERR("weird value is set initial delay");
+ return ret;
+ }
+
+ atomic_set(&data->poll_delay, delay);
+
+ memset(sensor_data, 0, sizeof(sensor_data));
+ sensor_data[0] = '0';
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_gyro_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ atomic_set(&data->enable, 0);
+
+ return ret;
+}
+
+static int create_input_device(struct maru_gyro_data *data)
+{
+ int ret = 0;
+ struct input_dev *input_data = NULL;
+
+ input_data = input_allocate_device();
+ if (input_data == NULL) {
+ ERR("failed initialing input handler");
+ gyro_clear(data);
+ return -ENOMEM;
+ }
+
+ input_data->name = SENSOR_GYRO_INPUT_NAME;
+ input_data->id.bustype = BUS_I2C;
+
+ set_bit(EV_REL, input_data->evbit);
+ set_bit(EV_SYN, input_data->evbit);
+ input_set_capability(input_data, EV_REL, REL_RX);
+ input_set_capability(input_data, EV_REL, REL_RY);
+ input_set_capability(input_data, EV_REL, REL_RZ);
+
+ data->input_data = input_data;
+
+ ret = input_register_device(input_data);
+ if (ret) {
+ ERR("failed to register input data");
+ gyro_clear(data);
+ return ret;
+ }
+
+ input_set_drvdata(input_data, data);
+
+ ret = sysfs_create_group(&input_data->dev.kobj,
+ &maru_gyro_attribute_group);
+ if (ret) {
+ gyro_clear(data);
+ ERR("failed initialing devices");
+ return ret;
+ }
+
+ return ret;
+}
+
+int maru_gyro_init(struct virtio_sensor *vs) {
+ int ret = 0;
+ struct maru_gyro_data *data = NULL;
+
+ INFO("maru_gyro device init starts.");
+
+ data = kmalloc(sizeof(struct maru_gyro_data), GFP_KERNEL);
+ if (data == NULL) {
+ ERR("failed to create gyro data.");
+ return -ENOMEM;
+ }
+
+ vs->gyro_handle = data;
+ data->vs = vs;
+
+ INIT_DELAYED_WORK(&data->work, maru_gyro_input_work_func);
+
+ // create name & vendor
+ ret = register_sensor_device(gyro_sensor_device, vs,
+ gyro_sensor_attrs, DRIVER_GYRO_NAME);
+ if (ret) {
+ ERR("failed to register gyro device");
+ gyro_clear(data);
+ return -1;
+ }
+
+#ifdef SUPPORT_LEGACY_SENSOR
+ ret = l_register_sensor_device(l_gyro_sensor_device, vs,
+ l_gyro_sensor_attrs, DRIVER_GYRO_NAME);
+ if (ret) {
+ ERR("failed to register legacy gyro device");
+ gyro_clear(data);
+ return -1;
+ }
+#endif
+
+ // create input
+ ret = create_input_device(data);
+ if (ret) {
+ ERR("failed to create input device");
+ return ret;
+ }
+
+ // set initial delay & enable
+ ret = set_initial_value(data);
+ if (ret) {
+ ERR("failed to set initial value");
+ return ret;
+ }
+
+ INFO("maru_gyro device init ends.");
+
+ return ret;
+}
+
+int maru_gyro_exit(struct virtio_sensor *vs) {
+ struct maru_gyro_data *data = NULL;
+
+ data = (struct maru_gyro_data *)vs->gyro_handle;
+ gyro_clear(data);
+ INFO("maru_gyro device exit ends.");
+ return 0;
+}
--- /dev/null
+/*
+ * Maru Virtio Haptic Sensor Device Driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Jinhyung Choi <jinhyung2.choi@samsung.com>
+ * Sangho Park <sangho1206.park@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "maru_virtio_sensor.h"
+
+struct maru_haptic_data {
+ struct input_dev *input_data;
+
+ struct virtio_sensor* vs;
+};
+
+static void haptic_clear(struct maru_haptic_data *data) {
+ if (data == NULL)
+ return;
+
+ if (data->input_data) {
+ input_ff_destroy(data->input_data);
+ input_free_device(data->input_data);
+ }
+
+ kfree(data);
+ data = NULL;
+}
+
+static int maru_upload_effect(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old)
+{
+ INFO("called maru_upload_effect. No work to do.");
+ return 0;
+}
+
+static int maru_erase_effect(struct input_dev *dev, int effect_id)
+{
+ INFO("called maru_erase_effect. No work to do.");
+ return 0;
+}
+
+static void maru_set_gain(struct input_dev *dev, u16 gain)
+{
+}
+
+static void maru_set_autocenter(struct input_dev *dev, u16 magnitude)
+{
+}
+
+static int maru_playback(struct input_dev *dev, int effect_id, int value)
+{
+ INFO("called maru_playback. No work to do.");
+ return 0;
+}
+
+static int create_input_device(struct maru_haptic_data *data)
+{
+ int ret = 0;
+ struct ff_device *ff;
+ struct input_dev *input_data = NULL;
+
+ input_data = input_allocate_device();
+ if (input_data == NULL) {
+ ERR("failed initialing input handler");
+ haptic_clear(data);
+ return -ENOMEM;
+ }
+
+ input_data->name = SENSOR_HAPTIC_INPUT_NAME;
+ input_data->id.bustype = BUS_I2C;
+
+ set_bit(EV_FF, input_data->evbit);
+ input_set_capability(input_data, EV_FF, FF_PERIODIC);
+
+ data->input_data = input_data;
+
+ input_set_drvdata(input_data, data);
+
+ ret = input_ff_create(input_data, 16);
+ if (ret)
+ return ret;
+
+ set_bit(FF_SQUARE, input_data->ffbit);
+ ff = input_data->ff;
+ ff->upload = maru_upload_effect;
+ ff->erase = maru_erase_effect;
+ ff->set_gain = maru_set_gain;
+ ff->set_autocenter = maru_set_autocenter;
+ ff->playback = maru_playback;
+
+ ret = input_register_device(input_data);
+ if (ret) {
+ ERR("failed to register input data");
+ haptic_clear(data);
+ return ret;
+ }
+
+
+ return ret;
+}
+
+int maru_haptic_init(struct virtio_sensor *vs) {
+ int ret = 0;
+ struct maru_haptic_data *data = NULL;
+
+ INFO("maru_haptic device init starts.");
+
+ data = kmalloc(sizeof(struct maru_haptic_data), GFP_KERNEL);
+ if (data == NULL) {
+ ERR("failed to create haptic data.");
+ return -ENOMEM;
+ }
+
+ data->vs = vs;
+ vs->haptic_handle = data;
+
+ // create input
+ ret = create_input_device(data);
+ if (ret) {
+ ERR("failed to create input device");
+ return ret;
+ }
+
+ INFO("maru_haptic device init ends.");
+
+ return ret;
+}
+
+int maru_haptic_exit(struct virtio_sensor *vs) {
+ struct maru_haptic_data *data = NULL;
+
+ data = (struct maru_haptic_data *)vs->haptic_handle;
+ haptic_clear(data);
+ INFO("maru_haptic device exit ends.");
+
+ return 0;
+}
--- /dev/null
+/*
+ * Maru Virtio HRM(HeartBeat Rate Monitor) Sensor Device Driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Jinhyung Choi <jinhyung2.choi@samsung.com>
+ * Sangho Park <sangho1206.park@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "maru_virtio_sensor.h"
+
+struct maru_hrm_data {
+ struct input_dev *input_data;
+ struct delayed_work work;
+
+ struct virtio_sensor* vs;
+
+ atomic_t enable;
+ atomic_t poll_delay;
+};
+
+static struct device *hrm_sensor_device;
+
+static void maru_hrm_input_work_func(struct work_struct *work) {
+
+ int poll_time = 200000000;
+ int enable = 0;
+ int ret = 0;
+ int hrm = 0, rri = 0;
+ char sensor_data[__MAX_BUF_SENSOR];
+ struct maru_hrm_data *data = container_of((struct delayed_work *)work,
+ struct maru_hrm_data, work);
+
+ LOG(1, "maru_hrm_input_work_func starts");
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ poll_time = atomic_read(&data->poll_delay);
+
+ enable = atomic_read(&data->enable);
+
+ if (enable) {
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_hrm, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (!ret) {
+ sscanf(sensor_data, "%d, %d", &hrm, &rri);
+ LOG(1, "hrm_set %d %d", hrm, rri);
+
+ input_report_rel(data->input_data, REL_X, hrm + 1);
+ input_report_rel(data->input_data, REL_Y, rri + 1);
+ input_report_rel(data->input_data, REL_Z, 1);
+ input_sync(data->input_data);
+ }
+ }
+
+ enable = atomic_read(&data->enable);
+
+ LOG(1, "enable: %d, poll_time: %d", enable, poll_time);
+ if (enable) {
+ if (poll_time > 0) {
+ schedule_delayed_work(&data->work, nsecs_to_jiffies(poll_time));
+ } else {
+ schedule_delayed_work(&data->work, 0);
+ }
+ }
+
+ LOG(1, "maru_hrm_input_work_func ends");
+
+}
+
+static ssize_t maru_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_HRM_DEVICE_NAME);
+}
+
+static ssize_t maru_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_SENSOR_DEVICE_VENDOR);
+}
+
+static ssize_t maru_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_hrm_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_hrm_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_hrm_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value != 0 && value != 1)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_hrm_enable, buf);
+ mutex_unlock(&data->vs->vqlock);
+
+ if (value) {
+ if (atomic_read(&data->enable) != 1) {
+ atomic_set(&data->enable, 1);
+ schedule_delayed_work(&data->work, 0);
+
+ }
+ } else {
+ if (atomic_read(&data->enable) != 0) {
+ atomic_set(&data->enable, 0);
+ cancel_delayed_work(&data->work);
+ }
+ }
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static ssize_t maru_poll_delay_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_hrm_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_hrm_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_poll_delay_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_hrm_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value < __MIN_DELAY_SENSOR)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_hrm_delay, buf);
+ mutex_unlock(&data->vs->vqlock);
+ atomic_set(&data->poll_delay, value);
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static struct device_attribute dev_attr_sensor_name =
+ __ATTR(name, S_IRUGO, maru_name_show, NULL);
+
+static struct device_attribute dev_attr_sensor_vendor =
+ __ATTR(vendor, S_IRUGO, maru_vendor_show, NULL);
+
+static struct device_attribute *hrm_sensor_attrs [] = {
+ &dev_attr_sensor_name,
+ &dev_attr_sensor_vendor,
+ NULL,
+};
+
+static struct device_attribute attr_hrm [] =
+{
+ MARU_ATTR_RW(enable),
+ MARU_ATTR_RW(poll_delay),
+};
+
+static struct attribute *maru_hrm_attribute[] = {
+ &attr_hrm[0].attr,
+ &attr_hrm[1].attr,
+ NULL
+};
+
+static struct attribute_group maru_hrm_attribute_group = {
+ .attrs = maru_hrm_attribute
+};
+
+static void hrm_clear(struct maru_hrm_data *data) {
+ if (data == NULL)
+ return;
+
+ if (data->input_data) {
+ sysfs_remove_group(&data->input_data->dev.kobj,
+ &maru_hrm_attribute_group);
+ input_free_device(data->input_data);
+ }
+
+ kfree(data);
+ data = NULL;
+}
+
+static int set_initial_value(struct maru_hrm_data *data)
+{
+ int delay = 0;
+ int ret = 0;
+ int enable = 0;
+ char sensor_data [__MAX_BUF_SENSOR];
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_hrm_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret) {
+ ERR("failed to get initial delay time");
+ return ret;
+ }
+
+ delay = sensor_atoi(sensor_data);
+
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_hrm_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret) {
+ ERR("failed to get initial enable");
+ return ret;
+ }
+
+ enable = sensor_atoi(sensor_data);
+
+ if (delay < 0) {
+ ERR("weird value is set initial delay");
+ return ret;
+ }
+
+ atomic_set(&data->poll_delay, delay);
+
+ if (enable) {
+ atomic_set(&data->enable, 1);
+ schedule_delayed_work(&data->work, 0);
+ }
+
+ return ret;
+}
+
+static int create_input_device(struct maru_hrm_data *data)
+{
+ int ret = 0;
+ struct input_dev *input_data = NULL;
+
+ input_data = input_allocate_device();
+ if (input_data == NULL) {
+ ERR("failed initialing input handler");
+ hrm_clear(data);
+ return -ENOMEM;
+ }
+
+ input_data->name = SENSOR_HRM_INPUT_NAME;
+ input_data->id.bustype = BUS_I2C;
+
+ set_bit(EV_REL, input_data->evbit);
+ input_set_capability(input_data, EV_REL, REL_X);
+ input_set_capability(input_data, EV_REL, REL_Y);
+ input_set_capability(input_data, EV_REL, REL_Z);
+
+ data->input_data = input_data;
+
+ ret = input_register_device(input_data);
+ if (ret) {
+ ERR("failed to register input data");
+ hrm_clear(data);
+ return ret;
+ }
+
+ input_set_drvdata(input_data, data);
+
+ ret = sysfs_create_group(&input_data->dev.kobj,
+ &maru_hrm_attribute_group);
+ if (ret) {
+ hrm_clear(data);
+ ERR("failed initialing devices");
+ return ret;
+ }
+
+ return ret;
+}
+
+int maru_hrm_init(struct virtio_sensor *vs) {
+ int ret = 0;
+ struct maru_hrm_data *data = NULL;
+
+ INFO("maru_hrm device init starts.");
+
+ data = kmalloc(sizeof(struct maru_hrm_data), GFP_KERNEL);
+ if (data == NULL) {
+ ERR("failed to create hrm data.");
+ return -ENOMEM;
+ }
+
+ vs->hrm_handle = data;
+ data->vs = vs;
+
+ INIT_DELAYED_WORK(&data->work, maru_hrm_input_work_func);
+
+ // create name & vendor
+ ret = register_sensor_device(hrm_sensor_device, vs,
+ hrm_sensor_attrs, DRIVER_HRM_NAME);
+ if (ret) {
+ ERR("failed to register hrm device");
+ hrm_clear(data);
+ return -1;
+ }
+
+ // create input
+ ret = create_input_device(data);
+ if (ret) {
+ ERR("failed to create input device");
+ return ret;
+ }
+
+ // set initial delay & enable
+ ret = set_initial_value(data);
+ if (ret) {
+ ERR("failed to set initial value");
+ return ret;
+ }
+
+ INFO("maru_hrm device init ends.");
+
+ return ret;
+}
+
+int maru_hrm_exit(struct virtio_sensor *vs) {
+ struct maru_hrm_data *data = NULL;
+
+ data = (struct maru_hrm_data *)vs->hrm_handle;
+ hrm_clear(data);
+ INFO("maru_hrm device exit ends.");
+ return 0;
+}
--- /dev/null
+/*
+ * Maru Virtio Light Sensor Device Driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Jinhyung Choi <jinhyung2.choi@samsung.com>
+ * Sangho Park <sangho1206.park@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "maru_virtio_sensor.h"
+
+struct maru_light_data {
+ struct input_dev *input_data;
+ struct delayed_work work;
+
+ struct virtio_sensor* vs;
+
+ atomic_t enable;
+ atomic_t poll_delay;
+};
+
+static struct device *light_sensor_device;
+
+#ifdef SUPPORT_LEGACY_SENSOR
+static struct device *l_light_sensor_device;
+#endif
+
+static void maru_light_input_work_func(struct work_struct *work) {
+
+ int poll_time = 200000000;
+ int enable = 0;
+ int ret = 0;
+ int light = 0;
+ char sensor_data[__MAX_BUF_SENSOR];
+ struct maru_light_data *data = container_of((struct delayed_work *)work,
+ struct maru_light_data, work);
+
+ LOG(1, "maru_light_input_work_func starts");
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ poll_time = atomic_read(&data->poll_delay);
+
+ enable = atomic_read(&data->enable);
+
+ if (enable) {
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_light, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (!ret) {
+ sscanf(sensor_data, "%d", &light);
+ LOG(1, "light_set %d", light);
+
+ input_report_rel(data->input_data, REL_RX, (light + 1)); // LUX
+ input_report_rel(data->input_data, REL_HWHEEL, 0); // red
+ input_report_rel(data->input_data, REL_DIAL, 0); // green
+ input_report_rel(data->input_data, REL_WHEEL, 0); // blue
+ input_report_rel(data->input_data, REL_MISC, 0); // white
+ input_sync(data->input_data);
+ }
+ }
+
+ enable = atomic_read(&data->enable);
+
+ LOG(1, "enable: %d, poll_time: %d", enable, poll_time);
+ if (enable) {
+ if (poll_time > 0) {
+ schedule_delayed_work(&data->work, nsecs_to_jiffies(poll_time));
+ } else {
+ schedule_delayed_work(&data->work, 0);
+ }
+ }
+
+ LOG(1, "maru_light_input_work_func ends");
+
+}
+
+static ssize_t maru_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_LIGHT_DEVICE_NAME);
+}
+
+static ssize_t maru_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_SENSOR_DEVICE_VENDOR);
+}
+
+static ssize_t maru_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_light_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_light_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_light_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value != 0 && value != 1)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_light_enable, buf);
+ mutex_unlock(&data->vs->vqlock);
+
+ if (value) {
+ if (atomic_read(&data->enable) != 1) {
+ atomic_set(&data->enable, 1);
+ schedule_delayed_work(&data->work, 0);
+
+ }
+ } else {
+ if (atomic_read(&data->enable) != 0) {
+ atomic_set(&data->enable, 0);
+ cancel_delayed_work(&data->work);
+ }
+ }
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static ssize_t maru_poll_delay_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_light_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_light_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_poll_delay_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_light_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value < __MIN_DELAY_SENSOR)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_light_delay, buf);
+ mutex_unlock(&data->vs->vqlock);
+ atomic_set(&data->poll_delay, value);
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static struct device_attribute dev_attr_sensor_name =
+ __ATTR(name, S_IRUGO, maru_name_show, NULL);
+
+static struct device_attribute dev_attr_sensor_vendor =
+ __ATTR(vendor, S_IRUGO, maru_vendor_show, NULL);
+
+static struct device_attribute *light_sensor_attrs [] = {
+ &dev_attr_sensor_name,
+ &dev_attr_sensor_vendor,
+ NULL,
+};
+
+#ifdef SUPPORT_LEGACY_SENSOR
+#define LIGHT_NAME_STR "light_sim"
+
+static ssize_t light_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", LIGHT_NAME_STR);
+}
+
+static ssize_t adc_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_light_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_light_adc, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t adc_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_light_data *data = input_get_drvdata(input_data);
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_light_adc, buf);
+ mutex_unlock(&data->vs->vqlock);
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static ssize_t level_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_light_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_light_level, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t level_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_light_data *data = input_get_drvdata(input_data);
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_light_level, buf);
+ mutex_unlock(&data->vs->vqlock);
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static struct device_attribute dev_attr_l_sensor_name =
+ __ATTR(name, S_IRUGO, light_name_show, NULL);
+
+static DEVICE_ATTR(adc, 0644, adc_show, adc_store);
+static DEVICE_ATTR(level, 0644, level_show, level_store);
+
+static struct device_attribute *l_light_sensor_attrs [] = {
+ &dev_attr_l_sensor_name,
+ &dev_attr_adc,
+ &dev_attr_level,
+ NULL,
+};
+#endif
+
+static struct device_attribute attr_light [] =
+{
+ MARU_ATTR_RW(enable),
+ MARU_ATTR_RW(poll_delay),
+};
+
+static struct attribute *maru_light_attribute[] = {
+ &attr_light[0].attr,
+ &attr_light[1].attr,
+ NULL
+};
+
+static struct attribute_group maru_light_attribute_group = {
+ .attrs = maru_light_attribute
+};
+
+static void light_clear(struct maru_light_data *data) {
+ if (data == NULL)
+ return;
+
+ if (data->input_data) {
+ sysfs_remove_group(&data->input_data->dev.kobj,
+ &maru_light_attribute_group);
+ input_free_device(data->input_data);
+ }
+
+ kfree(data);
+ data = NULL;
+}
+
+static int set_initial_value(struct maru_light_data *data)
+{
+ int delay = 0;
+ int ret = 0;
+ int enable = 0;
+ char sensor_data [__MAX_BUF_SENSOR];
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_light_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret) {
+ ERR("failed to get initial delay time");
+ return ret;
+ }
+
+ delay = sensor_atoi(sensor_data);
+
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_light_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret) {
+ ERR("failed to get initial enable");
+ return ret;
+ }
+
+ enable = sensor_atoi(sensor_data);
+
+ if (delay < 0) {
+ ERR("weird value is set initial delay");
+ return ret;
+ }
+
+ atomic_set(&data->poll_delay, delay);
+
+ if (enable) {
+ atomic_set(&data->enable, 1);
+ schedule_delayed_work(&data->work, 0);
+ }
+
+ return ret;
+}
+
+static int create_input_device(struct maru_light_data *data)
+{
+ int ret = 0;
+ struct input_dev *input_data = NULL;
+
+ input_data = input_allocate_device();
+ if (input_data == NULL) {
+ ERR("failed initialing input handler");
+ light_clear(data);
+ return -ENOMEM;
+ }
+
+ input_data->name = SENSOR_LIGHT_INPUT_NAME;
+ input_data->id.bustype = BUS_I2C;
+
+ set_bit(EV_REL, input_data->evbit);
+ input_set_capability(input_data, EV_REL, REL_RX);
+ input_set_capability(input_data, EV_REL, REL_HWHEEL);
+ input_set_capability(input_data, EV_REL, REL_DIAL);
+ input_set_capability(input_data, EV_REL, REL_WHEEL);
+ input_set_capability(input_data, EV_REL, REL_MISC);
+
+ data->input_data = input_data;
+
+ ret = input_register_device(input_data);
+ if (ret) {
+ ERR("failed to register input data");
+ light_clear(data);
+ return ret;
+ }
+
+ input_set_drvdata(input_data, data);
+
+ ret = sysfs_create_group(&input_data->dev.kobj,
+ &maru_light_attribute_group);
+ if (ret) {
+ light_clear(data);
+ ERR("failed initialing devices");
+ return ret;
+ }
+
+ return ret;
+}
+
+int maru_light_init(struct virtio_sensor *vs) {
+ int ret = 0;
+ struct maru_light_data *data = NULL;
+
+ INFO("maru_light device init starts.");
+
+ data = kmalloc(sizeof(struct maru_light_data), GFP_KERNEL);
+ if (data == NULL) {
+ ERR("failed to create light data.");
+ return -ENOMEM;
+ }
+
+ vs->light_handle = data;
+ data->vs = vs;
+
+ INIT_DELAYED_WORK(&data->work, maru_light_input_work_func);
+
+ // create name & vendor
+ ret = register_sensor_device(light_sensor_device, vs,
+ light_sensor_attrs, DRIVER_LIGHT_NAME);
+ if (ret) {
+ ERR("failed to register light device");
+ light_clear(data);
+ return -1;
+ }
+
+#ifdef SUPPORT_LEGACY_SENSOR
+ ret = l_register_sensor_device(l_light_sensor_device, vs,
+ l_light_sensor_attrs, DRIVER_LIGHT_NAME);
+ if (ret) {
+ ERR("failed to register legacy light device");
+ light_clear(data);
+ return -1;
+ }
+#endif
+
+ // create input
+ ret = create_input_device(data);
+ if (ret) {
+ ERR("failed to create input device");
+ return ret;
+ }
+
+ // set initial delay & enable
+ ret = set_initial_value(data);
+ if (ret) {
+ ERR("failed to set initial value");
+ return ret;
+ }
+
+ INFO("maru_light device init ends.");
+
+ return ret;
+}
+
+int maru_light_exit(struct virtio_sensor *vs) {
+ struct maru_light_data *data = NULL;
+
+ data = (struct maru_light_data *)vs->light_handle;
+ light_clear(data);
+ INFO("maru_light device exit ends.");
+ return 0;
+}
--- /dev/null
+/*
+ * Maru Virtio Pressure Sensor Device Driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Jinhyung Choi <jinhyung2.choi@samsung.com>
+ * Sangho Park <sangho1206.park@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "maru_virtio_sensor.h"
+
+struct maru_pressure_data {
+ struct input_dev *input_data;
+ struct delayed_work work;
+
+ struct virtio_sensor* vs;
+
+ atomic_t enable;
+ atomic_t poll_delay;
+};
+
+static struct device *pressure_sensor_device;
+
+#define PRESSURE_ADJUST 193
+static int pressure_convert_data(int number)
+{
+ return number * 10000 / PRESSURE_ADJUST;
+}
+
+#define TEMP_ADJUST 2
+static short temp_convert_data(int number)
+{
+ int temp;
+ temp = number * TEMP_ADJUST;
+ return (short)temp;
+}
+
+static void maru_pressure_input_work_func(struct work_struct *work) {
+
+ int poll_time = 200000000;
+ int enable = 0;
+ int ret = 0;
+ int pressure = 0;
+ int temperature = 0;
+ int raw_pressure;
+ short raw_temp;
+ char sensor_data[__MAX_BUF_SENSOR];
+ struct maru_pressure_data *data = container_of((struct delayed_work *)work,
+ struct maru_pressure_data, work);
+
+ LOG(1, "maru_pressure_input_work_func starts");
+
+ memset(sensor_data, 0, sizeof(sensor_data));
+ poll_time = atomic_read(&data->poll_delay);
+
+ enable = atomic_read(&data->enable);
+
+ if (enable) {
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_pressure, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (!ret) {
+ sscanf(sensor_data, "%d, %d", &pressure, &temperature);
+ LOG(1, "pressure_set %d, %d", pressure, temperature);
+
+ raw_pressure = pressure_convert_data(pressure);
+ if (temperature == 0) {
+ temperature = 1;
+ }
+ raw_temp = temp_convert_data(temperature);
+
+ LOG(1, "pressure raw pressure %d, temp %d.\n", raw_pressure, raw_temp);
+
+ input_report_rel(data->input_data, REL_HWHEEL, raw_pressure);
+ input_report_rel(data->input_data, REL_DIAL, 101325);
+ input_report_rel(data->input_data, REL_WHEEL, raw_temp);
+ input_sync(data->input_data);
+ }
+ }
+
+ enable = atomic_read(&data->enable);
+
+ LOG(1, "enable: %d, poll_time: %d", enable, poll_time);
+ if (enable) {
+ if (poll_time > 0) {
+ schedule_delayed_work(&data->work, nsecs_to_jiffies(poll_time));
+ } else {
+ schedule_delayed_work(&data->work, 0);
+ }
+ }
+
+ LOG(1, "maru_pressure_input_work_func ends");
+
+}
+
+static ssize_t maru_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_PRESSURE_DEVICE_NAME);
+}
+
+static ssize_t maru_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_SENSOR_DEVICE_VENDOR);
+}
+
+static ssize_t maru_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_pressure_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_pressure_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_pressure_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value != 0 && value != 1)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_pressure_enable, buf);
+ mutex_unlock(&data->vs->vqlock);
+
+ if (value) {
+ if (atomic_read(&data->enable) != 1) {
+ atomic_set(&data->enable, 1);
+ schedule_delayed_work(&data->work, 0);
+
+ }
+ } else {
+ if (atomic_read(&data->enable) != 0) {
+ atomic_set(&data->enable, 0);
+ cancel_delayed_work(&data->work);
+ }
+ }
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static ssize_t maru_poll_delay_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_pressure_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_pressure_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_poll_delay_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_pressure_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value < __MIN_DELAY_SENSOR)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_pressure_delay, buf);
+ mutex_unlock(&data->vs->vqlock);
+ atomic_set(&data->poll_delay, value);
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static struct device_attribute dev_attr_sensor_name =
+ __ATTR(name, S_IRUGO, maru_name_show, NULL);
+
+static struct device_attribute dev_attr_sensor_vendor =
+ __ATTR(vendor, S_IRUGO, maru_vendor_show, NULL);
+
+static struct device_attribute *pressure_sensor_attrs [] = {
+ &dev_attr_sensor_name,
+ &dev_attr_sensor_vendor,
+ NULL,
+};
+
+static struct device_attribute attr_pressure [] =
+{
+ MARU_ATTR_RW(enable),
+ MARU_ATTR_RW(poll_delay),
+};
+
+static struct attribute *maru_pressure_attribute[] = {
+ &attr_pressure[0].attr,
+ &attr_pressure[1].attr,
+ NULL
+};
+
+static struct attribute_group maru_pressure_attribute_group = {
+ .attrs = maru_pressure_attribute
+};
+
+static void pressure_clear(struct maru_pressure_data *data) {
+ if (data == NULL)
+ return;
+
+ if (data->input_data) {
+ sysfs_remove_group(&data->input_data->dev.kobj,
+ &maru_pressure_attribute_group);
+ input_free_device(data->input_data);
+ }
+
+ kfree(data);
+ data = NULL;
+}
+
+static int set_initial_value(struct maru_pressure_data *data)
+{
+ int delay = 0;
+ int ret = 0;
+ int enable = 0;
+ char sensor_data [__MAX_BUF_SENSOR];
+
+ memset(sensor_data, 0, sizeof(sensor_data));
+
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_pressure_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret) {
+ ERR("failed to get initial delay time");
+ return ret;
+ }
+
+ delay = sensor_atoi(sensor_data);
+
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_pressure_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret) {
+ ERR("failed to get initial enable");
+ return ret;
+ }
+
+ enable = sensor_atoi(sensor_data);
+
+ if (delay < 0) {
+ ERR("weird value is set initial delay");
+ return ret;
+ }
+
+ atomic_set(&data->poll_delay, delay);
+
+ if (enable) {
+ atomic_set(&data->enable, 1);
+ schedule_delayed_work(&data->work, 0);
+ }
+
+ return ret;
+}
+
+static int create_input_device(struct maru_pressure_data *data)
+{
+ int ret = 0;
+ struct input_dev *input_data = NULL;
+
+ input_data = input_allocate_device();
+ if (input_data == NULL) {
+ ERR("failed initialing input handler");
+ pressure_clear(data);
+ return -ENOMEM;
+ }
+
+ input_data->name = SENSOR_PRESSURE_INPUT_NAME;
+ input_data->id.bustype = BUS_I2C;
+
+ set_bit(EV_REL, input_data->evbit);
+ input_set_capability(input_data, EV_REL, REL_HWHEEL);
+ input_set_capability(input_data, EV_REL, REL_DIAL);
+ input_set_capability(input_data, EV_REL, REL_WHEEL);
+
+ data->input_data = input_data;
+
+ ret = input_register_device(input_data);
+ if (ret) {
+ ERR("failed to register input data");
+ pressure_clear(data);
+ return ret;
+ }
+
+ input_set_drvdata(input_data, data);
+
+ ret = sysfs_create_group(&input_data->dev.kobj,
+ &maru_pressure_attribute_group);
+ if (ret) {
+ pressure_clear(data);
+ ERR("failed initialing devices");
+ return ret;
+ }
+
+ return ret;
+}
+
+int maru_pressure_init(struct virtio_sensor *vs) {
+ int ret = 0;
+ struct maru_pressure_data *data = NULL;
+
+ INFO("maru_pressure device init starts.");
+
+ data = kmalloc(sizeof(struct maru_pressure_data), GFP_KERNEL);
+ if (data == NULL) {
+ ERR("failed to create pressure data.");
+ return -ENOMEM;
+ }
+
+ vs->pressure_handle = data;
+ data->vs = vs;
+
+ INIT_DELAYED_WORK(&data->work, maru_pressure_input_work_func);
+
+ // create name & vendor
+ ret = register_sensor_device(pressure_sensor_device, vs,
+ pressure_sensor_attrs, DRIVER_PRESSURE_NAME);
+ if (ret) {
+ ERR("failed to register pressure device");
+ pressure_clear(data);
+ return -1;
+ }
+
+ // create input
+ ret = create_input_device(data);
+ if (ret) {
+ ERR("failed to create input device");
+ return ret;
+ }
+
+ // set initial delay & enable
+ ret = set_initial_value(data);
+ if (ret) {
+ ERR("failed to set initial value");
+ return ret;
+ }
+
+ INFO("maru_pressure device init ends.");
+
+ return ret;
+}
+
+int maru_pressure_exit(struct virtio_sensor *vs) {
+ struct maru_pressure_data *data = NULL;
+
+ data = (struct maru_pressure_data *)vs->pressure_handle;
+ pressure_clear(data);
+ INFO("maru_pressure device exit ends.");
+ return 0;
+}
--- /dev/null
+/*
+ * Maru Virtio Proximity Sensor Device Driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Jinhyung Choi <jinhyung2.choi@samsung.com>
+ * Sangho Park <sangho1206.park@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "maru_virtio_sensor.h"
+
+struct maru_proxi_data {
+ struct input_dev *input_data;
+ struct delayed_work work;
+
+ struct virtio_sensor* vs;
+
+ atomic_t enable;
+ atomic_t poll_delay;
+};
+
+static struct device *proxi_sensor_device;
+
+#ifdef SUPPORT_LEGACY_SENSOR
+static struct device *l_proxi_sensor_device;
+#endif
+
+static void maru_proxi_input_work_func(struct work_struct *work) {
+
+ int poll_time = 200000000;
+ int enable = 0;
+ int ret = 0;
+ int proxi = 0;
+ char sensor_data[__MAX_BUF_SENSOR];
+ struct maru_proxi_data *data = container_of((struct delayed_work *)work,
+ struct maru_proxi_data, work);
+
+ LOG(1, "maru_proxi_input_work_func starts");
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ poll_time = atomic_read(&data->poll_delay);
+
+ enable = atomic_read(&data->enable);
+
+ if (enable) {
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_proxi, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (!ret) {
+ sscanf(sensor_data, "%d", &proxi);
+ if (proxi)
+ proxi = 1;
+
+ LOG(1, "proxi_set %d", proxi);
+
+ input_report_abs(data->input_data, ABS_DISTANCE, proxi);
+ input_sync(data->input_data);
+ }
+ }
+
+ enable = atomic_read(&data->enable);
+
+ LOG(1, "enable: %d, poll_time: %d", enable, poll_time);
+ if (enable) {
+ if (poll_time > 0) {
+ schedule_delayed_work(&data->work, nsecs_to_jiffies(poll_time));
+ } else {
+ schedule_delayed_work(&data->work, 0);
+ }
+ }
+
+ LOG(1, "maru_proxi_input_work_func ends");
+
+}
+
+static ssize_t maru_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_PROXI_DEVICE_NAME);
+}
+
+static ssize_t maru_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_SENSOR_DEVICE_VENDOR);
+}
+
+static ssize_t maru_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_proxi_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_proxi_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_proxi_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value != 0 && value != 1)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_proxi_enable, buf);
+ mutex_unlock(&data->vs->vqlock);
+
+ if (value) {
+ if (atomic_read(&data->enable) != 1) {
+ atomic_set(&data->enable, 1);
+ schedule_delayed_work(&data->work, 0);
+
+ }
+ } else {
+ if (atomic_read(&data->enable) != 0) {
+ atomic_set(&data->enable, 0);
+ cancel_delayed_work(&data->work);
+ }
+ }
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static ssize_t maru_poll_delay_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_proxi_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_proxi_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_poll_delay_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_proxi_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value < __MIN_DELAY_SENSOR)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_proxi_delay, buf);
+ mutex_unlock(&data->vs->vqlock);
+ atomic_set(&data->poll_delay, value);
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static struct device_attribute dev_attr_sensor_name =
+ __ATTR(name, S_IRUGO, maru_name_show, NULL);
+
+static struct device_attribute dev_attr_sensor_vendor =
+ __ATTR(vendor, S_IRUGO, maru_vendor_show, NULL);
+
+static struct device_attribute *proxi_sensor_attrs [] = {
+ &dev_attr_sensor_name,
+ &dev_attr_sensor_vendor,
+ NULL,
+};
+
+#ifdef SUPPORT_LEGACY_SENSOR
+#define PROXI_NAME_STR "proxi_sim"
+
+static ssize_t proxi_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", PROXI_NAME_STR);
+}
+
+static ssize_t enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_proxi_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_proxi_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_proxi_data *data = input_get_drvdata(input_data);
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_proxi_enable, buf);
+ mutex_unlock(&data->vs->vqlock);
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static ssize_t vo_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_proxi_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_proxi, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t vo_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_proxi_data *data = input_get_drvdata(input_data);
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_proxi, buf);
+ mutex_unlock(&data->vs->vqlock);
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static struct device_attribute dev_attr_l_sensor_name =
+ __ATTR(name, S_IRUGO, proxi_name_show, NULL);
+
+static DEVICE_ATTR(enable, 0644, enable_show, enable_store);
+static DEVICE_ATTR(vo, 0644, vo_show, vo_store);
+
+static struct device_attribute *l_proxi_sensor_attrs [] = {
+ &dev_attr_l_sensor_name,
+ &dev_attr_enable,
+ &dev_attr_vo,
+ NULL,
+};
+#endif
+
+static struct device_attribute attr_proxi [] =
+{
+ MARU_ATTR_RW(enable),
+ MARU_ATTR_RW(poll_delay),
+};
+
+static struct attribute *maru_proxi_attribute[] = {
+ &attr_proxi[0].attr,
+ &attr_proxi[1].attr,
+ NULL
+};
+
+static struct attribute_group maru_proxi_attribute_group = {
+ .attrs = maru_proxi_attribute
+};
+
+static void proxi_clear(struct maru_proxi_data *data) {
+ if (data == NULL)
+ return;
+
+ if (data->input_data) {
+ sysfs_remove_group(&data->input_data->dev.kobj,
+ &maru_proxi_attribute_group);
+ input_free_device(data->input_data);
+ }
+
+ kfree(data);
+ data = NULL;
+}
+
+static int set_initial_value(struct maru_proxi_data *data)
+{
+ int delay = 0;
+ int ret = 0;
+ int enable = 0;
+ char sensor_data [__MAX_BUF_SENSOR];
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_proxi_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret) {
+ ERR("failed to get initial delay time");
+ return ret;
+ }
+
+ delay = sensor_atoi(sensor_data);
+
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_proxi_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret) {
+ ERR("failed to get initial enable");
+ return ret;
+ }
+
+ enable = sensor_atoi(sensor_data);
+
+ if (delay < 0) {
+ ERR("weird value is set initial delay");
+ return ret;
+ }
+
+ atomic_set(&data->poll_delay, delay);
+
+ if (enable) {
+ atomic_set(&data->enable, 1);
+ schedule_delayed_work(&data->work, 0);
+ }
+
+ return ret;
+}
+
+static int create_input_device(struct maru_proxi_data *data)
+{
+ int ret = 0;
+ struct input_dev *input_data = NULL;
+
+ input_data = input_allocate_device();
+ if (input_data == NULL) {
+ ERR("failed initialing input handler");
+ proxi_clear(data);
+ return -ENOMEM;
+ }
+
+ input_data->name = SENSOR_PROXI_INPUT_NAME;
+
+ input_set_drvdata(input_data, data);
+
+ set_bit(EV_ABS, input_data->evbit);
+ input_set_capability(input_data, EV_ABS, ABS_DISTANCE);
+ input_set_abs_params(input_data, ABS_DISTANCE, 0, 1, 0, 0);
+
+ data->input_data = input_data;
+
+ ret = input_register_device(input_data);
+ if (ret) {
+ ERR("failed to register input data");
+ proxi_clear(data);
+ return ret;
+ }
+
+ ret = sysfs_create_group(&input_data->dev.kobj,
+ &maru_proxi_attribute_group);
+ if (ret) {
+ proxi_clear(data);
+ ERR("failed initialing devices");
+ return ret;
+ }
+
+ return ret;
+}
+
+int maru_proxi_init(struct virtio_sensor *vs) {
+ int ret = 0;
+ struct maru_proxi_data *data = NULL;
+
+ INFO("maru_proxi device init starts.");
+
+ data = kmalloc(sizeof(struct maru_proxi_data), GFP_KERNEL);
+ if (data == NULL) {
+ ERR("failed to create proxi data.");
+ return -ENOMEM;
+ }
+
+ vs->proxi_handle = data;
+ data->vs = vs;
+
+ INIT_DELAYED_WORK(&data->work, maru_proxi_input_work_func);
+
+ // create name & vendor
+ ret = register_sensor_device(proxi_sensor_device, vs,
+ proxi_sensor_attrs, DRIVER_PROXI_NAME);
+ if (ret) {
+ ERR("failed to register proxi device");
+ proxi_clear(data);
+ return -1;
+ }
+
+#ifdef SUPPORT_LEGACY_SENSOR
+ ret = l_register_sensor_device(l_proxi_sensor_device, vs,
+ l_proxi_sensor_attrs, DRIVER_PROXI_NAME);
+ if (ret) {
+ ERR("failed to register legacy proxi device");
+ proxi_clear(data);
+ return -1;
+ }
+#endif
+
+ // create input
+ ret = create_input_device(data);
+ if (ret) {
+ ERR("failed to create input device");
+ return ret;
+ }
+
+ // set initial delay & enable
+ ret = set_initial_value(data);
+ if (ret) {
+ ERR("failed to set initial value");
+ return ret;
+ }
+
+ INFO("maru_proxi device init ends.");
+
+ return ret;
+}
+
+int maru_proxi_exit(struct virtio_sensor *vs) {
+ struct maru_proxi_data *data = NULL;
+
+ data = (struct maru_proxi_data *)vs->proxi_handle;
+ proxi_clear(data);
+ INFO("maru_proxi device exit ends.");
+ return 0;
+}
--- /dev/null
+/*
+ * Maru Virtio Rotation Vector Sensor Device Driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Jinhyung Choi <jinhyung2.choi@samsung.com>
+ * Sangho Park <sangho1206.park@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "maru_virtio_sensor.h"
+
+struct maru_rotation_vector_data {
+ struct input_dev *input_data;
+ struct delayed_work work;
+
+ struct virtio_sensor* vs;
+
+ atomic_t enable;
+ atomic_t poll_delay;
+};
+
+static struct device *rotation_vector_sensor_device;
+
+static void maru_rotation_vector_input_work_func(struct work_struct *work) {
+
+ int poll_time = 200000000;
+ int enable = 0;
+ int ret = 0;
+ int quad_a, quad_b, quad_c, quad_d, accuracy;
+ char sensor_data[__MAX_BUF_SENSOR];
+ struct maru_rotation_vector_data *data = container_of((struct delayed_work *)work,
+ struct maru_rotation_vector_data, work);
+
+ LOG(1, "maru_rotation_vector_input_work_func starts");
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ poll_time = atomic_read(&data->poll_delay);
+
+ enable = atomic_read(&data->enable);
+
+ if (enable) {
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_rotation_vector, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (!ret) {
+ sscanf(sensor_data, "%d,%d,%d,%d,%d", &quad_a, &quad_b, &quad_c, &quad_d, &accuracy);
+ LOG(1, "rotation_vector_set %d,%d,%d,%d,%d", quad_a, quad_b, quad_c, quad_d, accuracy);
+
+ input_report_rel(data->input_data, REL_X, quad_a);
+ input_report_rel(data->input_data, REL_Y, quad_b);
+ input_report_rel(data->input_data, REL_Z, quad_c);
+ input_report_rel(data->input_data, REL_RX, quad_d);
+ input_report_rel(data->input_data, REL_RY, accuracy);
+ input_sync(data->input_data);
+ }
+ }
+
+ enable = atomic_read(&data->enable);
+
+ LOG(1, "enable: %d, poll_time: %d", enable, poll_time);
+ if (enable) {
+ if (poll_time > 0) {
+ schedule_delayed_work(&data->work, nsecs_to_jiffies(poll_time));
+ } else {
+ schedule_delayed_work(&data->work, 0);
+ }
+ }
+
+ LOG(1, "maru_rotation_vector_input_work_func ends");
+
+}
+
+static ssize_t maru_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_ROTATION_DEVICE_NAME);
+}
+
+static ssize_t maru_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_SENSOR_DEVICE_VENDOR);
+}
+
+static ssize_t maru_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_rotation_vector_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_rotation_vector_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_rotation_vector_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value != 0 && value != 1)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_rotation_vector_enable, buf);
+ mutex_unlock(&data->vs->vqlock);
+
+ if (value) {
+ if (atomic_read(&data->enable) != 1) {
+ atomic_set(&data->enable, 1);
+ schedule_delayed_work(&data->work, 0);
+
+ }
+ } else {
+ if (atomic_read(&data->enable) != 0) {
+ atomic_set(&data->enable, 0);
+ cancel_delayed_work(&data->work);
+ }
+ }
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static ssize_t maru_poll_delay_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_rotation_vector_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_rotation_vector_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_poll_delay_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_rotation_vector_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value < __MIN_DELAY_SENSOR)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_rotation_vector_delay, buf);
+ mutex_unlock(&data->vs->vqlock);
+ atomic_set(&data->poll_delay, value);
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static struct device_attribute dev_attr_sensor_name =
+ __ATTR(name, S_IRUGO, maru_name_show, NULL);
+
+static struct device_attribute dev_attr_sensor_vendor =
+ __ATTR(vendor, S_IRUGO, maru_vendor_show, NULL);
+
+static struct device_attribute *rotation_vector_sensor_attrs [] = {
+ &dev_attr_sensor_name,
+ &dev_attr_sensor_vendor,
+ NULL,
+};
+
+static struct device_attribute attr_rotation_vector [] =
+{
+ MARU_ATTR_RW(enable),
+ MARU_ATTR_RW(poll_delay),
+};
+
+static struct attribute *maru_rotation_vector_attribute[] = {
+ &attr_rotation_vector[0].attr,
+ &attr_rotation_vector[1].attr,
+ NULL
+};
+
+static struct attribute_group maru_rotation_vector_attribute_group = {
+ .attrs = maru_rotation_vector_attribute
+};
+
+static void rotation_vector_clear(struct maru_rotation_vector_data *data) {
+ if (data == NULL)
+ return;
+
+ if (data->input_data) {
+ sysfs_remove_group(&data->input_data->dev.kobj,
+ &maru_rotation_vector_attribute_group);
+ input_free_device(data->input_data);
+ }
+
+ kfree(data);
+ data = NULL;
+}
+
+static int set_initial_value(struct maru_rotation_vector_data *data)
+{
+ int delay = 0;
+ int ret = 0;
+ char sensor_data [__MAX_BUF_SENSOR];
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_rotation_vector_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret) {
+ ERR("failed to get initial delay time");
+ return ret;
+ }
+
+ delay = sensor_atoi(sensor_data);
+ if (delay < 0) {
+ ERR("weird value is set initial delay");
+ return ret;
+ }
+
+ atomic_set(&data->poll_delay, delay);
+
+ memset(sensor_data, 0, sizeof(sensor_data));
+ sensor_data[0] = '0';
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_rotation_vector_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ atomic_set(&data->enable, 0);
+
+ return ret;
+}
+
+static int create_input_device(struct maru_rotation_vector_data *data)
+{
+ int ret = 0;
+ struct input_dev *input_data = NULL;
+
+ input_data = input_allocate_device();
+ if (input_data == NULL) {
+ ERR("failed initialing input handler");
+ rotation_vector_clear(data);
+ return -ENOMEM;
+ }
+
+ input_data->name = SENSOR_ROTATION_INPUT_NAME;
+ input_data->id.bustype = BUS_I2C;
+
+ set_bit(EV_REL, input_data->evbit);
+ set_bit(EV_SYN, input_data->evbit);
+ input_set_capability(input_data, EV_REL, REL_X);
+ input_set_capability(input_data, EV_REL, REL_Y);
+ input_set_capability(input_data, EV_REL, REL_Z);
+ input_set_capability(input_data, EV_REL, REL_RX);
+ input_set_capability(input_data, EV_REL, REL_RY);
+
+ data->input_data = input_data;
+
+ ret = input_register_device(input_data);
+ if (ret) {
+ ERR("failed to register input data");
+ rotation_vector_clear(data);
+ return ret;
+ }
+
+ input_set_drvdata(input_data, data);
+
+ ret = sysfs_create_group(&input_data->dev.kobj,
+ &maru_rotation_vector_attribute_group);
+ if (ret) {
+ rotation_vector_clear(data);
+ ERR("failed initialing devices");
+ return ret;
+ }
+
+ return ret;
+}
+
+int maru_rotation_vector_init(struct virtio_sensor *vs) {
+ int ret = 0;
+ struct maru_rotation_vector_data *data = NULL;
+
+ INFO("maru_rotation_vector device init starts.");
+
+ data = kmalloc(sizeof(struct maru_rotation_vector_data), GFP_KERNEL);
+ if (data == NULL) {
+ ERR("failed to create rotation_vector data.");
+ return -ENOMEM;
+ }
+
+ vs->rotation_vector_handle = data;
+ data->vs = vs;
+
+ INIT_DELAYED_WORK(&data->work, maru_rotation_vector_input_work_func);
+
+ // create name & vendor
+ ret = register_sensor_device(rotation_vector_sensor_device, vs,
+ rotation_vector_sensor_attrs, DRIVER_ROTATION_NAME);
+ if (ret) {
+ ERR("failed to register rotation_vector device");
+ rotation_vector_clear(data);
+ return -1;
+ }
+
+ // create input
+ ret = create_input_device(data);
+ if (ret) {
+ ERR("failed to create input device");
+ return ret;
+ }
+
+ // set initial delay & enable
+ ret = set_initial_value(data);
+ if (ret) {
+ ERR("failed to set initial value");
+ return ret;
+ }
+
+ INFO("maru_rotation_vector device init ends.");
+
+ return ret;
+}
+
+int maru_rotation_vector_exit(struct virtio_sensor *vs) {
+ struct maru_rotation_vector_data *data = NULL;
+
+ data = (struct maru_rotation_vector_data *)vs->rotation_vector_handle;
+ rotation_vector_clear(data);
+ INFO("maru_rotation_vector device exit ends.");
+ return 0;
+}
--- /dev/null
+/*
+ * Maru Virtio UltraViolet Sensor Device Driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Jinhyung Choi <jinhyung2.choi@samsung.com>
+ * Sangho Park <sangho1206.park@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "maru_virtio_sensor.h"
+
+struct maru_uv_data {
+ struct input_dev *input_data;
+ struct delayed_work work;
+
+ struct virtio_sensor* vs;
+
+ atomic_t enable;
+ atomic_t poll_delay;
+};
+
+static struct device *uv_sensor_device;
+
+static void maru_uv_input_work_func(struct work_struct *work) {
+
+ int poll_time = 200000000;
+ int enable = 0;
+ int ret = 0;
+ int uv = 0;
+ char sensor_data[__MAX_BUF_SENSOR];
+ struct maru_uv_data *data = container_of((struct delayed_work *)work,
+ struct maru_uv_data, work);
+
+ LOG(1, "maru_uv_input_work_func starts");
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ poll_time = atomic_read(&data->poll_delay);
+
+ enable = atomic_read(&data->enable);
+
+ if (enable) {
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_uv, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (!ret) {
+ sscanf(sensor_data, "%d", &uv);
+ LOG(1, "uv_set %d", uv);
+
+ input_report_rel(data->input_data, REL_MISC, (uv + 1));
+ input_sync(data->input_data);
+ }
+ }
+
+ enable = atomic_read(&data->enable);
+
+ LOG(1, "enable: %d, poll_time: %d", enable, poll_time);
+ if (enable) {
+ if (poll_time > 0) {
+ schedule_delayed_work(&data->work, nsecs_to_jiffies(poll_time));
+ } else {
+ schedule_delayed_work(&data->work, 0);
+ }
+ }
+
+ LOG(1, "maru_uv_input_work_func ends");
+
+}
+
+static ssize_t maru_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_UV_DEVICE_NAME);
+}
+
+static ssize_t maru_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", MARU_SENSOR_DEVICE_VENDOR);
+}
+
+static ssize_t maru_enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_uv_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_uv_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_uv_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value != 0 && value != 1)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_uv_enable, buf);
+ mutex_unlock(&data->vs->vqlock);
+
+ if (value) {
+ if (atomic_read(&data->enable) != 1) {
+ atomic_set(&data->enable, 1);
+ schedule_delayed_work(&data->work, 0);
+
+ }
+ } else {
+ if (atomic_read(&data->enable) != 0) {
+ atomic_set(&data->enable, 0);
+ cancel_delayed_work(&data->work);
+ }
+ }
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static ssize_t maru_poll_delay_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char sensor_data[__MAX_BUF_SENSOR];
+ int ret;
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_uv_data *data = input_get_drvdata(input_data);
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_uv_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret)
+ return sprintf(buf, "%d", -1);
+
+ return sprintf(buf, "%s", sensor_data);
+}
+
+static ssize_t maru_poll_delay_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct input_dev *input_data = to_input_dev(dev);
+ struct maru_uv_data *data = input_get_drvdata(input_data);
+ int value = simple_strtoul(buf, NULL, 10);
+
+ if (value < __MIN_DELAY_SENSOR)
+ return count;
+
+ mutex_lock(&data->vs->vqlock);
+ set_sensor_data(sensor_type_uv_delay, buf);
+ mutex_unlock(&data->vs->vqlock);
+ atomic_set(&data->poll_delay, value);
+
+ return strnlen(buf, __MAX_BUF_SENSOR);
+}
+
+static struct device_attribute dev_attr_sensor_name =
+ __ATTR(name, S_IRUGO, maru_name_show, NULL);
+
+static struct device_attribute dev_attr_sensor_vendor =
+ __ATTR(vendor, S_IRUGO, maru_vendor_show, NULL);
+
+static struct device_attribute *uv_sensor_attrs [] = {
+ &dev_attr_sensor_name,
+ &dev_attr_sensor_vendor,
+ NULL,
+};
+
+static struct device_attribute attr_uv [] =
+{
+ MARU_ATTR_RW(enable),
+ MARU_ATTR_RW(poll_delay),
+};
+
+static struct attribute *maru_uv_attribute[] = {
+ &attr_uv[0].attr,
+ &attr_uv[1].attr,
+ NULL
+};
+
+static struct attribute_group maru_uv_attribute_group = {
+ .attrs = maru_uv_attribute
+};
+
+static void uv_clear(struct maru_uv_data *data) {
+ if (data == NULL)
+ return;
+
+ if (data->input_data) {
+ sysfs_remove_group(&data->input_data->dev.kobj,
+ &maru_uv_attribute_group);
+ input_free_device(data->input_data);
+ }
+
+ kfree(data);
+ data = NULL;
+}
+
+static int set_initial_value(struct maru_uv_data *data)
+{
+ int delay = 0;
+ int ret = 0;
+ int enable = 0;
+ char sensor_data [__MAX_BUF_SENSOR];
+
+ memset(sensor_data, 0, __MAX_BUF_SENSOR);
+
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_uv_delay, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret) {
+ ERR("failed to get initial delay time");
+ return ret;
+ }
+
+ delay = sensor_atoi(sensor_data);
+
+ mutex_lock(&data->vs->vqlock);
+ ret = get_sensor_data(sensor_type_uv_enable, sensor_data);
+ mutex_unlock(&data->vs->vqlock);
+ if (ret) {
+ ERR("failed to get initial enable");
+ return ret;
+ }
+
+ enable = sensor_atoi(sensor_data);
+
+ if (delay < 0) {
+ ERR("weird value is set initial delay");
+ return ret;
+ }
+
+ atomic_set(&data->poll_delay, delay);
+
+ if (enable) {
+ atomic_set(&data->enable, 1);
+ schedule_delayed_work(&data->work, 0);
+ }
+
+ return ret;
+}
+
+static int create_input_device(struct maru_uv_data *data)
+{
+ int ret = 0;
+ struct input_dev *input_data = NULL;
+
+ input_data = input_allocate_device();
+ if (input_data == NULL) {
+ ERR("failed initialing input handler");
+ uv_clear(data);
+ return -ENOMEM;
+ }
+
+ input_data->name = SENSOR_UV_INPUT_NAME;
+ input_data->id.bustype = BUS_I2C;
+
+ set_bit(EV_REL, input_data->evbit);
+ input_set_capability(input_data, EV_REL, REL_MISC);
+
+ data->input_data = input_data;
+
+ ret = input_register_device(input_data);
+ if (ret) {
+ ERR("failed to register input data");
+ uv_clear(data);
+ return ret;
+ }
+
+ input_set_drvdata(input_data, data);
+
+ ret = sysfs_create_group(&input_data->dev.kobj,
+ &maru_uv_attribute_group);
+ if (ret) {
+ uv_clear(data);
+ ERR("failed initialing devices");
+ return ret;
+ }
+
+ return ret;
+}
+
+int maru_uv_init(struct virtio_sensor *vs) {
+ int ret = 0;
+ struct maru_uv_data *data = NULL;
+
+ INFO("maru_uv device init starts.");
+
+ data = kmalloc(sizeof(struct maru_uv_data), GFP_KERNEL);
+ if (data == NULL) {
+ ERR("failed to create uv data.");
+ return -ENOMEM;
+ }
+
+ vs->uv_handle = data;
+ data->vs = vs;
+
+ INIT_DELAYED_WORK(&data->work, maru_uv_input_work_func);
+
+ // create name & vendor
+ ret = register_sensor_device(uv_sensor_device, vs,
+ uv_sensor_attrs, DRIVER_UV_NAME);
+ if (ret) {
+ ERR("failed to register uv device");
+ uv_clear(data);
+ return -1;
+ }
+
+ // create input
+ ret = create_input_device(data);
+ if (ret) {
+ ERR("failed to create input device");
+ return ret;
+ }
+
+ // set initial delay & enable
+ ret = set_initial_value(data);
+ if (ret) {
+ ERR("failed to set initial value");
+ return ret;
+ }
+
+ INFO("maru_uv device init ends.");
+
+ return ret;
+}
+
+int maru_uv_exit(struct virtio_sensor *vs) {
+ struct maru_uv_data *data = NULL;
+
+ data = (struct maru_uv_data *)vs->uv_handle;
+ uv_clear(data);
+ INFO("maru_uv device exit ends.");
+ return 0;
+}
--- /dev/null
+/*
+ * Maru Virtio Sensor Device Driver
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Jinhyung Choi <jinhyung2.choi@samsung.com>
+ * Daiyoung Kim <daiyoung777.kim@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/cdev.h>
+
+#include "maru_virtio_sensor.h"
+
+int sensor_driver_debug = 0;
+module_param(sensor_driver_debug, int, 0644);
+MODULE_PARM_DESC(sensor_driver_debug, "Turn on/off maru sensor debugging (default:off).");
+
+static struct virtio_device_id id_table[] = { { VIRTIO_ID_SENSOR,
+ VIRTIO_DEV_ANY_ID }, { 0 }, };
+
+static char sensor_data[__MAX_BUF_SENSOR];
+
+struct virtio_sensor *vs;
+
+static DECLARE_WAIT_QUEUE_HEAD(wq);
+
+int sensor_atoi(const char *value)
+{
+ int val = 0;
+
+ for (;; value++) {
+ switch (*value) {
+ case '0' ... '9':
+ val = 10*val+(*value-'0');
+ break;
+ default:
+ return val;
+ }
+ }
+}
+
+int register_sensor_device(struct device *dev, struct virtio_sensor *vs,
+ struct device_attribute *attributes[], const char* name)
+{
+ int i = 0, err = 0;
+
+ if (!vs->sensor_class) {
+ ERR("sensor class is not created before make device");
+ return -1;
+ }
+
+ INFO("device creation: %s.", name);
+
+ dev = device_create(vs->sensor_class, NULL, MKDEV(0,0), NULL, "%s", name);
+ if (dev < 0) {
+ ERR("register_device_create failed!");
+ return -1;
+ }
+
+ if (attributes == NULL) {
+ ERR("attributes is NULL.");
+ return -1;
+ }
+
+ for (i = 0; attributes[i] != NULL; i++) {
+ if ((err = device_create_file(dev, attributes[i])) < 0) {
+ ERR("failed to create device file with attribute[%d - %d]", i, err);
+ return -1;
+ }
+ }
+
+ INFO("register_sensor_device ends: %s.", name);
+
+ return 0;
+}
+
+#ifdef SUPPORT_LEGACY_SENSOR
+
+int l_register_sensor_device(struct device *dev, struct virtio_sensor *vs,
+ struct device_attribute *attributes[], const char* name)
+{
+ int i = 0, err = 0;
+
+ if (!vs->l_sensor_class) {
+ ERR("l sensor class is not created before make device");
+ return -1;
+ }
+
+ dev = device_create(vs->l_sensor_class, NULL, MKDEV(0,0), NULL, "%s", name);
+ if (dev < 0) {
+ ERR("legacy register_device_create failed!");
+ return -1;
+ }
+
+ if (attributes == NULL) {
+ ERR("l sensor attributes is NULL.");
+ return -1;
+ }
+
+ for (i = 0; attributes[i] != NULL; i++) {
+ if ((err = device_create_file(dev, attributes[i])) < 0) {
+ ERR("failed to create legacy device file with attribute[%d - %d]", i, err);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+#endif
+
+static void sensor_vq_done(struct virtqueue *rvq) {
+ unsigned int len;
+ struct msg_info* msg;
+
+ msg = (struct msg_info*) virtqueue_get_buf(vs->vq, &len);
+ if (msg == NULL) {
+ ERR("failed to virtqueue_get_buf");
+ return;
+ }
+
+ if (msg->req != request_answer) {
+ LOG(1, "set_sensor_data callback.");
+ mutex_lock(&vs->lock);
+ vs->flags = 1;
+ mutex_unlock(&vs->lock);
+
+ wake_up_interruptible(&wq);
+ return;
+ }
+
+ if (msg->buf == NULL) {
+ ERR("receive queue- message from host is NULL.");
+ return;
+ }
+
+ mutex_lock(&vs->lock);
+ LOG(1, "msg buf: %s, req: %d, type: %d, vs->flags: %d", msg->buf, msg->req, msg->type, vs->flags);
+
+ memset(sensor_data, 0, sizeof(sensor_data));
+ strcpy(sensor_data, msg->buf);
+ vs->flags = 1;
+ mutex_unlock(&vs->lock);
+
+ wake_up_interruptible(&wq);
+}
+
+void set_sensor_data(int type, const char* buf)
+{
+ int err = 0;
+
+ if (buf == NULL) {
+ ERR("set_sensor buf is NULL.");
+ return;
+ }
+
+ if (vs == NULL) {
+ ERR("Invalid sensor handle");
+ return;
+ }
+
+ mutex_lock(&vs->lock);
+ memset(&vs->msginfo, 0, sizeof(vs->msginfo));
+
+ vs->msginfo.req = request_set;
+ vs->msginfo.type = type;
+ strcpy(vs->msginfo.buf, buf);
+
+ LOG(1, "set_sensor_data type: %d, req: %d, buf: %s",
+ vs->msginfo.type, vs->msginfo.req, vs->msginfo.buf);
+
+ mutex_unlock(&vs->lock);
+
+ err = virtqueue_add_outbuf(vs->vq, vs->sg_svq, 1, &vs->msginfo, GFP_ATOMIC);
+ if (err < 0) {
+ ERR("failed to add_outbuf buffer to virtqueue (err = %d)", err);
+ return;
+ }
+
+ virtqueue_kick(vs->vq);
+
+ wait_event_interruptible(wq, vs->flags != 0);
+
+ mutex_lock(&vs->lock);
+ vs->flags = 0;
+ mutex_unlock(&vs->lock);
+}
+
+int get_sensor_data(int type, char* data)
+{
+ struct scatterlist *sgs[2];
+ int err = 0;
+
+ if (vs == NULL || data == NULL) {
+ ERR("Invalid sensor handle or data is NULL.");
+ return -1;
+ }
+
+ mutex_lock(&vs->lock);
+ memset(&vs->msginfo, 0, sizeof(vs->msginfo));
+
+ vs->msginfo.req = request_get;
+ vs->msginfo.type = type;
+
+ LOG(1, "get_sensor_data start type: %d, req: %d",
+ vs->msginfo.type, vs->msginfo.req);
+
+ sgs[0] = &vs->sg_vq[0];
+ sgs[1] = &vs->sg_vq[1];
+ mutex_unlock(&vs->lock);
+
+ err = virtqueue_add_sgs(vs->vq, sgs, 1, 1, &vs->msginfo, GFP_ATOMIC);
+ if (err < 0) {
+ ERR("failed to add_sgs buffer to virtqueue (err = %d)", err);
+ return err;
+ }
+
+ virtqueue_kick(vs->vq);
+
+ wait_event_interruptible(wq, vs->flags != 0);
+
+ mutex_lock(&vs->lock);
+ vs->flags = 0;
+ memcpy(data, sensor_data, strlen(sensor_data));
+ mutex_unlock(&vs->lock);
+
+ LOG(1, "get_sensor_data end type: %d, data: %p", type, data);
+ return 0;
+}
+
+static void device_init(struct virtio_sensor *vs)
+{
+ int ret = 0;
+
+ if (vs->sensor_capability & sensor_cap_accel) {
+ ret = maru_accel_init(vs);
+ if (ret) {
+ vs->sensor_fail_init |= sensor_cap_accel;
+ ERR("failed to init accel with error %d", ret);
+ }
+ }
+
+ if (vs->sensor_capability & sensor_cap_geo) {
+ ret = maru_geo_init(vs);
+ if (ret) {
+ vs->sensor_fail_init |= sensor_cap_geo;
+ ERR("failed to init geo with error %d", ret);
+ }
+ }
+
+ if (vs->sensor_capability & sensor_cap_gyro) {
+ ret = maru_gyro_init(vs);
+ if (ret) {
+ vs->sensor_fail_init |= sensor_cap_gyro;
+ ERR("failed to init gyro with error %d", ret);
+ }
+ }
+
+ if (vs->sensor_capability & sensor_cap_light) {
+ ret = maru_light_init(vs);
+ if (ret) {
+ vs->sensor_fail_init |= sensor_cap_light;
+ ERR("failed to init light with error %d", ret);
+ }
+ }
+
+ if (vs->sensor_capability & sensor_cap_proxi) {
+ ret = maru_proxi_init(vs);
+ if (ret) {
+ vs->sensor_fail_init |= sensor_cap_proxi;
+ ERR("failed to init proxi with error %d", ret);
+ }
+ }
+
+ if (vs->sensor_capability & sensor_cap_rotation_vector) {
+ ret = maru_rotation_vector_init(vs);
+ if (ret) {
+ vs->sensor_fail_init |= sensor_cap_rotation_vector;
+ ERR("failed to init rotation vector with error %d", ret);
+ }
+ }
+
+ if (vs->sensor_capability & sensor_cap_haptic) {
+ ret = maru_haptic_init(vs);
+ if (ret) {
+ vs->sensor_fail_init |= sensor_cap_haptic;
+ ERR("failed to init haptic with error %d", ret);
+ }
+ }
+
+ if (vs->sensor_capability & sensor_cap_pressure) {
+ ret = maru_pressure_init(vs);
+ if (ret) {
+ vs->sensor_fail_init |= sensor_cap_pressure;
+ ERR("failed to init pressure with error %d", ret);
+ }
+ }
+
+ if (vs->sensor_capability & sensor_cap_uv) {
+ ret = maru_uv_init(vs);
+ if (ret) {
+ vs->sensor_fail_init |= sensor_cap_uv;
+ ERR("failed to init uv with error %d", ret);
+ }
+ }
+
+ if (vs->sensor_capability & sensor_cap_hrm) {
+ ret = maru_hrm_init(vs);
+ if (ret) {
+ vs->sensor_fail_init |= sensor_cap_hrm;
+ ERR("failed to init hrm with error %d", ret);
+ }
+ }
+}
+
+static void device_exit(struct virtio_sensor *vs)
+{
+ if (vs->sensor_capability & sensor_cap_accel &&
+ !(vs->sensor_fail_init & sensor_cap_accel)) {
+ maru_accel_exit(vs);
+ }
+
+ if (vs->sensor_capability & sensor_cap_geo &&
+ !(vs->sensor_fail_init & sensor_cap_geo)) {
+ maru_geo_exit(vs);
+ }
+
+ if (vs->sensor_capability & sensor_cap_gyro &&
+ !(vs->sensor_fail_init & sensor_cap_gyro)) {
+ maru_gyro_exit(vs);
+ }
+
+ if (vs->sensor_capability & sensor_cap_light &&
+ !(vs->sensor_fail_init & sensor_cap_light)) {
+ maru_light_exit(vs);
+ }
+
+ if (vs->sensor_capability & sensor_cap_proxi &&
+ !(vs->sensor_fail_init & sensor_cap_proxi)) {
+ maru_proxi_exit(vs);
+ }
+
+ if (vs->sensor_capability & sensor_cap_rotation_vector &&
+ !(vs->sensor_fail_init & sensor_cap_rotation_vector)) {
+ maru_rotation_vector_exit(vs);
+ }
+
+ if (vs->sensor_capability & sensor_cap_haptic &&
+ !(vs->sensor_fail_init & sensor_cap_haptic)) {
+ maru_haptic_exit(vs);
+ }
+
+ if (vs->sensor_capability & sensor_cap_pressure &&
+ !(vs->sensor_fail_init & sensor_cap_pressure)) {
+ maru_pressure_exit(vs);
+ }
+
+ if (vs->sensor_capability & sensor_cap_uv &&
+ !(vs->sensor_fail_init & sensor_cap_uv)) {
+ maru_uv_exit(vs);
+ }
+
+ if (vs->sensor_capability & sensor_cap_hrm &&
+ !(vs->sensor_fail_init & sensor_cap_hrm)) {
+ maru_hrm_exit(vs);
+ }
+}
+
+static void cleanup(struct virtio_device* dev) {
+ dev->config->del_vqs(dev);
+
+ if (vs == NULL)
+ return;
+
+ if (vs->sensor_class) {
+ device_destroy(vs->sensor_class, MKDEV(0,0));
+ class_destroy(vs->sensor_class);
+ }
+
+#ifdef SUPPORT_LEGACY_SENSOR
+ if (vs->l_sensor_class) {
+ device_destroy(vs->l_sensor_class, MKDEV(0,0));
+ class_destroy(vs->l_sensor_class);
+ }
+#endif
+
+ kfree(vs);
+ vs = NULL;
+}
+
+static int sensor_probe(struct virtio_device* dev)
+{
+ int ret = 0;
+ char sensor_data[__MAX_BUF_SENSOR];
+
+ INFO("Sensor probe starts");
+
+ vs = kmalloc(sizeof(struct virtio_sensor), GFP_KERNEL);
+ if (!vs) {
+ ERR("failed to allocate sensor structure.");
+ return -ENOMEM;
+ }
+
+ vs->vdev = dev;
+ dev->priv = vs;
+
+ vs->sensor_class = class_create(THIS_MODULE, SENSOR_CLASS_NAME);
+ if (IS_ERR(vs->sensor_class)) {
+ ERR("sensor class creation is failed.");
+ return PTR_ERR(vs->sensor_class);
+ }
+
+#ifdef SUPPORT_LEGACY_SENSOR
+ vs->l_sensor_class = class_create(THIS_MODULE, L_SENSOR_CLASS_NAME);
+ if (IS_ERR(vs->sensor_class)) {
+ ERR("sensor class creation is failed.");
+ return PTR_ERR(vs->sensor_class);
+ }
+#endif
+
+ vs->vq = virtio_find_single_vq(dev, sensor_vq_done, "sensor");
+ if (IS_ERR(vs->vq)) {
+ cleanup(dev);
+ ERR("failed to init virt queue");
+ return ret;
+ }
+
+ virtqueue_enable_cb(vs->vq);
+
+ sg_init_one(&vs->sg_vq[0], &vs->msginfo, sizeof(vs->msginfo));
+ sg_init_one(&vs->sg_vq[1], &vs->msginfo, sizeof(vs->msginfo));
+
+ sg_init_one(vs->sg_svq, &vs->msginfo, sizeof(vs->msginfo));
+
+ mutex_init(&vs->lock);
+ mutex_init(&vs->vqlock);
+
+ memset(sensor_data, 0, sizeof(sensor_data));
+ mutex_lock(&vs->vqlock);
+ ret = get_sensor_data(sensor_type_list, sensor_data);
+ mutex_unlock(&vs->vqlock);
+ if (ret) {
+ ERR("sensor capability data is null.");
+ cleanup(dev);
+ return ret;
+ }
+
+ INFO("sensor raw capability is %s", sensor_data);
+ vs->sensor_capability = sensor_atoi(sensor_data);
+ INFO("sensor capability is %02x", vs->sensor_capability);
+
+ if (vs->sensor_capability == 0) {
+ ERR("No sensor devices ");
+ cleanup(dev);
+ return ret;
+ }
+
+ device_init(vs);
+
+ if (vs->sensor_capability == vs->sensor_fail_init) {
+ ERR("failed initialing all devices");
+ cleanup(dev);
+ return ret;
+ }
+
+ INFO("Sensor probe completes");
+
+ return ret;
+}
+
+static void sensor_remove(struct virtio_device* dev)
+{
+ struct virtio_sensor* vs = dev->priv;
+ if (!vs)
+ {
+ ERR("vs is NULL");
+ return;
+ }
+
+ dev->config->reset(dev);
+
+ device_exit(vs);
+
+ cleanup(dev);
+
+ INFO("Sensor driver is removed.");
+}
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_sensor_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE ,
+ },
+ .id_table = id_table,
+ .probe = sensor_probe,
+ .remove = sensor_remove,
+};
+
+
+static int __init sensor_init(void)
+{
+ INFO("Sensor driver initialized.");
+
+ return register_virtio_driver(&virtio_sensor_driver);
+}
+
+static void __exit sensor_exit(void)
+{
+ unregister_virtio_driver(&virtio_sensor_driver);
+
+ INFO("Sensor driver is destroyed.");
+}
+
+module_init(sensor_init);
+module_exit(sensor_exit);
+
+MODULE_LICENSE("GPL2");
+MODULE_AUTHOR("Jinhyung Choi <jinhyung2.choi@samsung.com>");
+MODULE_DESCRIPTION("Emulator Virtio Sensor Driver");
+
--- /dev/null
+/*
+ * Maru Virtio Sensor Device Driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Contact:
+ * Jinhyung Choi <jinhyung2.choi@samsung.com>
+ * Sangho Park <sangho1206.park@samsung.com>
+ * YeongKyoon Lee <yeongkyoon.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributors:
+ * - S-Core Co., Ltd
+ *
+ */
+
+#ifndef _MARU_VIRTIO_SENSOR_H
+#define _MARU_VIRTIO_SENSOR_H
+
+#include <linux/kernel.h>
+#include <linux/virtio.h>
+#include <linux/input.h>
+#include <linux/limits.h>
+
+#define SUPPORT_LEGACY_SENSOR 1
+
+enum request_cmd {
+ request_get = 0,
+ request_set,
+ request_answer
+};
+
+enum sensor_types {
+ sensor_type_list = 0,
+ sensor_type_accel,
+ sensor_type_accel_enable,
+ sensor_type_accel_delay,
+ sensor_type_geo,
+ sensor_type_geo_enable, // 5
+ sensor_type_geo_delay,
+ sensor_type_gyro,
+ sensor_type_gyro_enable,
+ sensor_type_gyro_delay,
+ sensor_type_gyro_x, // 10
+ sensor_type_gyro_y,
+ sensor_type_gyro_z,
+ sensor_type_light,
+ sensor_type_light_enable,
+ sensor_type_light_delay, // 15
+ sensor_type_light_adc,
+ sensor_type_light_level,
+ sensor_type_proxi,
+ sensor_type_proxi_enable,
+ sensor_type_proxi_delay, // 20
+ sensor_type_rotation_vector,
+ sensor_type_rotation_vector_enable,
+ sensor_type_rotation_vector_delay,
+ sensor_type_mag,
+ sensor_type_tilt, // 25
+ sensor_type_pressure,
+ sensor_type_pressure_enable,
+ sensor_type_pressure_delay,
+ sensor_type_uv,
+ sensor_type_uv_enable,
+ sensor_type_uv_delay,
+ sensor_type_hrm,
+ sensor_type_hrm_heart,
+ sensor_type_hrm_rri,
+ sensor_type_hrm_enable,
+ sensor_type_hrm_delay,
+ sensor_type_max
+};
+
+enum sensor_capabilities {
+ sensor_cap_accel = 0x0001,
+ sensor_cap_geo = 0x0002,
+ sensor_cap_gyro = 0x0004,
+ sensor_cap_light = 0x0008,
+ sensor_cap_proxi = 0x0010,
+ sensor_cap_rotation_vector = 0x0020,
+ sensor_cap_haptic = 0x0040,
+ sensor_cap_pressure = 0x0080,
+ sensor_cap_uv = 0x0100,
+ sensor_cap_hrm = 0x0200
+};
+
+#define __MAX_BUF_SIZE 1024
+#define __MAX_BUF_SENSOR 32
+
+#define __MIN_DELAY_SENSOR 1000000
+#define __MAX_DELAY_SENSOR INT_MAX
+
+struct msg_info {
+ char buf[__MAX_BUF_SIZE];
+
+ uint16_t type;
+ uint16_t req;
+};
+
+#ifdef SUPPORT_LEGACY_SENSOR
+# define L_SENSOR_CLASS_NAME "sensor"
+#endif
+
+struct virtio_sensor {
+ struct virtio_device* vdev;
+ struct virtqueue* vq;
+
+ struct msg_info msginfo;
+ struct scatterlist sg_vq[2];
+ struct scatterlist sg_svq[1];
+
+ int flags;
+ struct mutex lock;
+ struct mutex vqlock;
+
+ struct class* sensor_class;
+
+#ifdef SUPPORT_LEGACY_SENSOR
+ struct class* l_sensor_class;
+#endif
+
+ int sensor_capability;
+ int sensor_fail_init;
+
+ void* accel_handle;
+ void* geo_handle;
+ void* gyro_handle;
+ void* light_handle;
+ void* proxi_handle;
+ void* rotation_vector_handle;
+ void* haptic_handle;
+ void* pressure_handle;
+ void* uv_handle;
+ void* hrm_handle;
+};
+
+#define MARU_DEVICE_ATTR(_name) \
+ struct device_attribute dev_attr_##_name = MARU_ATTR_RONLY(_name)
+
+#define MARU_ATTR_RONLY(_name) { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = maru_##_name##_show, \
+}
+
+#define MARU_ATTR_RW(_name) { \
+ .attr = {.name = __stringify(_name), .mode = 0644 }, \
+ .show = maru_##_name##_show, \
+ .store = maru_##_name##_store, \
+}
+
+int sensor_atoi(const char *value);
+
+int register_sensor_device(struct device *dev, struct virtio_sensor *vs,
+ struct device_attribute *attributes[], const char* name);
+
+#ifdef SUPPORT_LEGACY_SENSOR
+int l_register_sensor_device(struct device *dev, struct virtio_sensor *vs,
+ struct device_attribute *attributes[], const char* name);
+#endif
+
+void set_sensor_data(int type, const char* buf);
+int get_sensor_data(int type, char* data);
+
+#define SENSOR_CLASS_NAME "sensors"
+#define MARU_SENSOR_DEVICE_VENDOR "Tizen_SDK"
+
+#define DRIVER_ACCEL_NAME "accel"
+#define SENSOR_ACCEL_INPUT_NAME "accelerometer_sensor"
+#define MARU_ACCEL_DEVICE_NAME "maru_sensor_accel_1"
+
+#define DRIVER_GEO_NAME "geo"
+#define SENSOR_GEO_INPUT_NAME "geomagnetic_sensor"
+#define MARU_GEO_DEVICE_NAME "maru_sensor_geo_1"
+
+#define DRIVER_GYRO_NAME "gyro"
+#define SENSOR_GYRO_INPUT_NAME "gyro_sensor"
+#define MARU_GYRO_DEVICE_NAME "maru_sensor_gyro_1"
+
+#define DRIVER_LIGHT_NAME "light"
+#define SENSOR_LIGHT_INPUT_NAME "light_sensor"
+#define MARU_LIGHT_DEVICE_NAME "maru_sensor_light_1"
+
+#define DRIVER_PROXI_NAME "proxi"
+#define SENSOR_PROXI_INPUT_NAME "proximity_sensor"
+#define MARU_PROXI_DEVICE_NAME "maru_sensor_proxi_1"
+
+#define DRIVER_ROTATION_NAME "rotation"
+#define SENSOR_ROTATION_INPUT_NAME "rot_sensor"
+#define MARU_ROTATION_DEVICE_NAME "maru_sensor_rotation_vector_1"
+
+#define SENSOR_HAPTIC_INPUT_NAME "haptic_sensor"
+
+#define DRIVER_PRESSURE_NAME "pressure"
+#define SENSOR_PRESSURE_INPUT_NAME "pressure_sensor"
+#define MARU_PRESSURE_DEVICE_NAME "maru_sensor_pressure_1"
+
+#define DRIVER_UV_NAME "ultraviolet"
+#define SENSOR_UV_INPUT_NAME "uv_sensor"
+#define MARU_UV_DEVICE_NAME "maru_sensor_uv_1"
+
+#define DRIVER_HRM_NAME "hrm"
+#define SENSOR_HRM_INPUT_NAME "hrm_lib_sensor"
+#define MARU_HRM_DEVICE_NAME "maru_sensor_hrm_1"
+
+// It locates /sys/module/maru_virtio_sensor/parameters/sensor_driver_debug
+extern int sensor_driver_debug;
+
+#define ERR(fmt, ...) \
+ printk(KERN_ERR "%s: " fmt "\n", SENSOR_CLASS_NAME, ##__VA_ARGS__)
+
+#define INFO(fmt, ...) \
+ printk(KERN_INFO "%s: " fmt "\n", SENSOR_CLASS_NAME, ##__VA_ARGS__)
+
+#define LOG(log_level, fmt, ...) \
+ do { \
+ if (sensor_driver_debug == (log_level)) { \
+ printk(KERN_INFO "%s: " fmt "\n", SENSOR_CLASS_NAME, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+/*
+ * Accelerometer device
+ */
+int maru_accel_init(struct virtio_sensor *vs);
+int maru_accel_exit(struct virtio_sensor *vs);
+
+/*
+ * Geomagnetic device
+ */
+int maru_geo_init(struct virtio_sensor *vs);
+int maru_geo_exit(struct virtio_sensor *vs);
+
+/*
+ * Gyroscope device
+ */
+int maru_gyro_init(struct virtio_sensor *vs);
+int maru_gyro_exit(struct virtio_sensor *vs);
+
+/*
+ * Light device
+ */
+int maru_light_init(struct virtio_sensor *vs);
+int maru_light_exit(struct virtio_sensor *vs);
+
+/*
+ * Proximity device
+ */
+int maru_proxi_init(struct virtio_sensor *vs);
+int maru_proxi_exit(struct virtio_sensor *vs);
+
+/*
+ * Rotation Vector device
+ */
+int maru_rotation_vector_init(struct virtio_sensor *vs);
+int maru_rotation_vector_exit(struct virtio_sensor *vs);
+
+/*
+ * Haptic device
+ */
+int maru_haptic_init(struct virtio_sensor *vs);
+int maru_haptic_exit(struct virtio_sensor *vs);
+
+/*
+ * Pressure device
+ */
+int maru_pressure_init(struct virtio_sensor *vs);
+int maru_pressure_exit(struct virtio_sensor *vs);
+
+/*
+ * UV(UltraViolet) device
+ */
+int maru_uv_init(struct virtio_sensor *vs);
+int maru_uv_exit(struct virtio_sensor *vs);
+
+/*
+ * HRM(Heart Beat Rate) device
+ */
+int maru_hrm_init(struct virtio_sensor *vs);
+int maru_hrm_exit(struct virtio_sensor *vs);
+
+#endif
source "drivers/gpu/drm/Kconfig"
+# for maru board
+source "drivers/gpu/yagl/Kconfig"
+#
+
menu "Frame buffer Devices"
source "drivers/video/fbdev/Kconfig"
endmenu
--- /dev/null
+/*
+ * vigs_drm.h
+ */
+
+#ifndef _VIGS_DRM_H_
+#define _VIGS_DRM_H_
+
+/*
+ * Bump this whenever driver interface changes.
+ */
+#define DRM_VIGS_DRIVER_VERSION 14
+
+/*
+ * Surface access flags.
+ */
+#define DRM_VIGS_SAF_READ 1
+#define DRM_VIGS_SAF_WRITE 2
+#define DRM_VIGS_SAF_MASK 3
+
+/*
+ * Number of DP framebuffers.
+ */
+#define DRM_VIGS_NUM_DP_FB_BUF 4
+
+/*
+ * DP memory types.
+ */
+#define DRM_VIGS_DP_FB_Y 2
+#define DRM_VIGS_DP_FB_C 3
+
+struct drm_vigs_get_protocol_version
+{
+ uint32_t version;
+};
+
+struct drm_vigs_create_surface
+{
+ uint32_t width;
+ uint32_t height;
+ uint32_t stride;
+ uint32_t format;
+ int scanout;
+ uint32_t handle;
+ uint32_t size;
+ uint32_t id;
+};
+
+struct drm_vigs_create_execbuffer
+{
+ uint32_t size;
+ uint32_t handle;
+};
+
+struct drm_vigs_gem_map
+{
+ uint32_t handle;
+ int track_access;
+ unsigned long address;
+};
+
+struct drm_vigs_gem_wait
+{
+ uint32_t handle;
+};
+
+struct drm_vigs_surface_info
+{
+ uint32_t handle;
+ uint32_t width;
+ uint32_t height;
+ uint32_t stride;
+ uint32_t format;
+ int scanout;
+ uint32_t size;
+ uint32_t id;
+};
+
+struct drm_vigs_exec
+{
+ uint32_t handle;
+};
+
+struct drm_vigs_surface_set_gpu_dirty
+{
+ uint32_t handle;
+};
+
+struct drm_vigs_surface_start_access
+{
+ unsigned long address;
+ uint32_t saf;
+};
+
+struct drm_vigs_surface_end_access
+{
+ unsigned long address;
+ int sync;
+};
+
+struct drm_vigs_create_fence
+{
+ int send;
+ uint32_t handle;
+ uint32_t seq;
+};
+
+struct drm_vigs_fence_wait
+{
+ uint32_t handle;
+};
+
+struct drm_vigs_fence_signaled
+{
+ uint32_t handle;
+ int signaled;
+};
+
+struct drm_vigs_fence_unref
+{
+ uint32_t handle;
+};
+
+struct drm_vigs_plane_set_zpos
+{
+ uint32_t plane_id;
+ int zpos;
+};
+
+struct drm_vigs_plane_set_transform
+{
+ uint32_t plane_id;
+ int hflip;
+ int vflip;
+ int rotation;
+};
+
+struct drm_vigs_dp_create_surface
+{
+ uint32_t dp_plane;
+ uint32_t dp_fb_buf;
+ uint32_t dp_mem_flag;
+ uint32_t width;
+ uint32_t height;
+ uint32_t stride;
+ uint32_t format;
+ uint32_t handle;
+ uint32_t size;
+ uint32_t id;
+};
+
+struct drm_vigs_dp_open_surface
+{
+ uint32_t dp_plane;
+ uint32_t dp_fb_buf;
+ uint32_t dp_mem_flag;
+ uint32_t handle;
+};
+
+#define DRM_VIGS_GET_PROTOCOL_VERSION 0x00
+#define DRM_VIGS_CREATE_SURFACE 0x01
+#define DRM_VIGS_CREATE_EXECBUFFER 0x02
+#define DRM_VIGS_GEM_MAP 0x03
+#define DRM_VIGS_GEM_WAIT 0x04
+#define DRM_VIGS_SURFACE_INFO 0x05
+#define DRM_VIGS_EXEC 0x06
+#define DRM_VIGS_SURFACE_SET_GPU_DIRTY 0x07
+#define DRM_VIGS_SURFACE_START_ACCESS 0x08
+#define DRM_VIGS_SURFACE_END_ACCESS 0x09
+#define DRM_VIGS_CREATE_FENCE 0x0A
+#define DRM_VIGS_FENCE_WAIT 0x0B
+#define DRM_VIGS_FENCE_SIGNALED 0x0C
+#define DRM_VIGS_FENCE_UNREF 0x0D
+#define DRM_VIGS_PLANE_SET_ZPOS 0x0E
+#define DRM_VIGS_PLANE_SET_TRANSFORM 0x0F
+
+#define DRM_VIGS_DP_CREATE_SURFACE 0x20
+#define DRM_VIGS_DP_OPEN_SURFACE 0x21
+
+#define DRM_IOCTL_VIGS_GET_PROTOCOL_VERSION DRM_IOR(DRM_COMMAND_BASE + \
+ DRM_VIGS_GET_PROTOCOL_VERSION, struct drm_vigs_get_protocol_version)
+#define DRM_IOCTL_VIGS_CREATE_SURFACE DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_CREATE_SURFACE, struct drm_vigs_create_surface)
+#define DRM_IOCTL_VIGS_CREATE_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_CREATE_EXECBUFFER, struct drm_vigs_create_execbuffer)
+#define DRM_IOCTL_VIGS_GEM_MAP DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_GEM_MAP, struct drm_vigs_gem_map)
+#define DRM_IOCTL_VIGS_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_GEM_WAIT, struct drm_vigs_gem_wait)
+#define DRM_IOCTL_VIGS_SURFACE_INFO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_SURFACE_INFO, struct drm_vigs_surface_info)
+#define DRM_IOCTL_VIGS_EXEC DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_EXEC, struct drm_vigs_exec)
+#define DRM_IOCTL_VIGS_SURFACE_SET_GPU_DIRTY DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_SURFACE_SET_GPU_DIRTY, struct drm_vigs_surface_set_gpu_dirty)
+#define DRM_IOCTL_VIGS_SURFACE_START_ACCESS DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_SURFACE_START_ACCESS, struct drm_vigs_surface_start_access)
+#define DRM_IOCTL_VIGS_SURFACE_END_ACCESS DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_SURFACE_END_ACCESS, struct drm_vigs_surface_end_access)
+#define DRM_IOCTL_VIGS_CREATE_FENCE DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_CREATE_FENCE, struct drm_vigs_create_fence)
+#define DRM_IOCTL_VIGS_FENCE_WAIT DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_FENCE_WAIT, struct drm_vigs_fence_wait)
+#define DRM_IOCTL_VIGS_FENCE_SIGNALED DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_FENCE_SIGNALED, struct drm_vigs_fence_signaled)
+#define DRM_IOCTL_VIGS_FENCE_UNREF DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_FENCE_UNREF, struct drm_vigs_fence_unref)
+#define DRM_IOCTL_VIGS_PLANE_SET_ZPOS DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_PLANE_SET_ZPOS, struct drm_vigs_plane_set_zpos)
+#define DRM_IOCTL_VIGS_PLANE_SET_TRANSFORM DRM_IOW(DRM_COMMAND_BASE + \
+ DRM_VIGS_PLANE_SET_TRANSFORM, struct drm_vigs_plane_set_transform)
+
+#define DRM_IOCTL_VIGS_DP_CREATE_SURFACE DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_DP_CREATE_SURFACE, struct drm_vigs_dp_create_surface)
+#define DRM_IOCTL_VIGS_DP_OPEN_SURFACE DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VIGS_DP_OPEN_SURFACE, struct drm_vigs_dp_open_surface)
+
+#endif
#define PCI_VENDOR_ID_OCZ 0x1b85
+#ifdef CONFIG_MARU
+/* maru devices */
+#define PCI_VENDOR_ID_TIZEN 0xC9B5
+#define PCI_DEVICE_ID_VIRTUAL_BRIGHTNESS 0x1014
+#define PCI_DEVICE_ID_VIRTUAL_CAMERA 0x1018
+#endif
+
#endif /* _LINUX_PCI_IDS_H */
#define VIRTIO_ID_GPU 16 /* virtio GPU */
#define VIRTIO_ID_INPUT 18 /* virtio input */
+#ifdef CONFIG_MARU
+/* maru devices */
+#define VIRTIO_ID_TOUCHSCREEN 31 /* virtio touchscreen */
+#define VIRTIO_ID_KEYBOARD 32 /* virtio keyboard */
+#define VIRTIO_ID_ESM 33 /* virtio Emulator Status Monitor */
+#define VIRTIO_ID_HWKEY 34 /* virtio hwkey */
+#define VIRTIO_ID_EVDI 35 /* virtio emulator virtual device interface */
+#define VIRTIO_ID_GL 36 /* virtio glmem */
+#define VIRTIO_ID_SENSOR 37 /* virtio sensor */
+#define VIRTIO_ID_NFC 38 /* virtio nfc */
+#define VIRTIO_ID_JACK 39 /* virtio jack */
+#define VIRTIO_ID_POWER 40 /* virtio power supply */
+#define VIRTIO_ID_VMODEM 41 /* virtio VMODEM */
+#define VIRTIO_ID_ROTARY 42 /* virtio rotary */
+#define VIRTIO_ID_TABLET 43 /* virtio tablet */
+#endif
+
#endif /* _LINUX_VIRTIO_IDS_H */
--- /dev/null
+#!/bin/sh -xe
+# clean
+clean()
+{
+ if ["${TARGET_OS}" != "ubuntu-32" ] && ["${TARGET_OS}" != "ubuntu-64" ]
+ then
+ exit 1
+ fi
+
+ rm -rf .git
+}
+
+# build
+build()
+{
+ # for i386 (32bit) kernel
+ ./build-i386.sh
+ mv arch/x86/boot/bzImage ./bzImage.i386
+
+ make distclean
+
+ # for x86-64 (64bit) kernel
+ ./build-x86_64.sh
+ mv arch/x86/boot/bzImage ./bzImage.x86_64
+}
+
+# install
+install()
+{
+ KERNEL_DIR=$SRCDIR/package/3.0-emulator-kernel-x86.package.${TARGET_OS}/data/platforms/tizen-3.0/common/emulator/data/kernel
+ mkdir -p $KERNEL_DIR
+
+ cp ./bzImage.i386 $KERNEL_DIR/bzImage.i386
+ cp ./bzImage.x86_64 $KERNEL_DIR/bzImage.x86_64
+}
--- /dev/null
+* 3.14.16
+- maru-inputs: fix unexpected termination using maru-input devices
+== Jinhyung Jo <jinhyung.jo@samsung.com> 2016-03-08
+* 3.14.15
+- smack: apply patches for socket fd passing
+== Jinhyung Jo <jinhyung.jo@samsung.com> 2016-03-02
+* 3.14.14
+- PAT: apply PAT features for W/A
+- sensor: modify the proximity value appropriately
+== Sooyoung Ha <yoosah.ha@samsung.com> 2016-01-27
+* 3.14.13
+- VIGS: set correct DPI value
+== Jinhyung Jo <jinhyung.jo@samsung.com> 2015-11-23
+* 3.14.12
+- YaGL: Bump version with the device and platform
+== Jinhyung Jo <jinhyung.jo@samsung.com> 2015-11-06
+* 3.14.11
+- tablet: added input buffer to virtqueue
+- VIGS: Temporary W/A for the extension
+- tuner: separate dvb_frontend build for tv extension
+- tablet: removed unused codes
+- tablet: added maru tablet driver
+== SeokYeon Hwang <syeon.hwang@samsung.com> 2015-09-10
+* 3.14.10
+- Merge branch 'tizen_2.4_develop' into tizen_3.0_develop
+== Sooyoung Ha <yoosah.ha@samsung.com> 2015-08-19
+* 3.14.9
+- not build security partially when extension supported
+== Jinhyung Choi <jinh0.choi@samsung.com> 2015-07-21
+* 3.14.8
+- not build ac97_codec & ac97_pcm when maru_extension config is on
+== Jinhyung Choi <jinh0.choi@samsung.com> 2015-07-21
+* 3.14.7
+- Ready to build for 64bit & extensions
+- Allow smack patches
+- maru-camera: Changed the method for passing the argument to/from device
+== Jinhyung Jo <jinhyung.jo@samsung.com> 2015-07-15
+* 3.14.6
+- Apply MII
+== Munkyu Im <munkyu.im@samsung.com> 2015-05-27
+* 3.14.5
+- x86, mm/ASLR: Fix stack randomization on 64-bit systems
+- net: sctp: fix slab corruption from use after free on INIT collisions
+- vfs: read file_handle only once in handle_to_path
+== Sungmin Ha <sungmin82.ha@samsung.com> 2015-04-15
+* 3.14.4
+- initramfs: mount devtmpfs before switching root
+== Sooyoung Ha <yoosah.ha@samsung.com> 2015-03-25
+* 3.14.3
+- package: version up
+== Sungmin Ha <sungmin82.ha@samsung.com> 2015-03-20
+* 3.14.2
+- sensor: a global sensor mutex for message transfer
+== Jinhyung Choi <jinhyung2.choi@samsung.com> 2015-03-12
+* 3.14.1
+- package: version up
+== Sungmin Ha <sungmin82.ha@samsung.com> 2015-03-11
+* 2.0.8
+- Merge branch 'tizen_next_linux_3.14' into tizen_next
+== SeokYeon Hwang <syeon.hwang@samsung.com> 2014-12-22
+* 2.0.7
+- config: turn CAN on
+== Alice Liu <alice.liu@intel.com> 2014-11-19
+* 2.0.6
+- v4l2-core: Modified error code in v4l_enumstd()
+- config: turn SMP on
+== Alice Liu <alice.liu@intel.com> 2014-11-10
+* 2.0.5
+- packaging: build only for emulator supported target
+- VIGS: Remove rotation definitions
+- packaging: workaround missing v3.12.18 tag from upstream git
+- packaging: makes repo / tarball name matching and other config fixes
+- perf tools: define _DEFAULT_SOURCE for glibc_2.20
+- sensor: added pressure, ultraviolet, and hrm sensor
+- Revert "uname: Add Emulator specific name"
+- remove IVI specific config file
+- config: enable CONFIG_FHANDLE
+== Alice Liu <alice.liu@intel.com> 2014-11-07
+* 2.0.4
+- packaging: Initial packaging on 3.12.18 for Tizen
+- package: Prevent marking "+" at kernel version
+== Alice Liu <alice.liu@intel.com> 2014-11-07
+* 2.0.3
+- VIGS: Implement plane flip/rotate
+- VIGS: Support YUV420 planar format
+- VIGS: fix DPMS deadlock
+- VIGS: Support DP memory and planar pixel formats
+- brillcodec: add new command for reducing I/O
+== Jinhyung Jo <jinhyung.jo@samsung.com> 2014-08-28
+* 2.0.2
+- Add emulator specific uname
+== Munkyu Im <munkyu.im@samsung.com> 2014-07-11
+* 2.0.1
+- sensors: re-structure with input_event
+- maru_haptic: force feedback driver is added
+- jack & power driver: MAX buf size to 512
+- evdi: added guest emuld connection message
+== Jinhyung Choi <jinhyung2.choi@samsung.com> 2014-06-20
+* 2.0.0
+- Major package version up
+== SeokYeon Hwang <syeon.hwang@samsung.com> 2014-06-10
+* 1.4.36
+- Apply Linux Kernel 3.12
+== GiWoong Kim <giwoong.kim@samsung.com> 2014-05-13
+* 1.4.35
+- set touchscreen resolution
+== GiWoong Kim <giwoong.kim@samsung.com> 2014-04-14
+* 1.4.34
+- enabling selective sensors and jacks
+== Jinhyung Choi <jinhyung2.choi@samsung.com> 2014-03-20
+* 1.4.33
+- Implemented the HBM(High Brightness Mode) for the maru brightness.
+== Jinhyung Jo <jinhyung.jo@samsung.com> 2014-03-07
+* 1.4.32
+- Data is moved into qemu for jacks, battery, and sensors
+== Jinhyung Choi <jinhyung2.choi@samsung.com> 2014-03-06
+* 1.4.31
+- Implemented multicore rendering and fences
+== GiWoong Kim <giwoong.kim@samsung.com> 2014-01-17
+* 1.4.30
+- enable configs to support spice
+== Sungmin Ha <sungmin82.ha@samsung.com> 2013-12-18
+* 1.4.29
+- support multi session on nfc
+== Munkyu Im <munkyu.im@samsung.com> 2013-11-12
+* 1.4.28
+- Support emulator suspend
+== SeokYeon Hwang <syeon.hwang@samsung.com> 2013-09-23
+* 1.4.27
+- package version up
+== Munkyu Im <munkyu.im@samsung.com> 2013-09-23
+* 1.4.26
+- package version up
+== Munkyu Im <munkyu.im@samsung.com> 2013-09-02
+* 1.4.25
+- Support virtio evdi, sensor, nfc
+== Jinhyung Choi <jinhyung2.choi@samsung.com> 2013-07-30
+* 1.4.24
+- modified structure of virtio-hwkey
+== Sungmin Ha <sungmin82.ha@samsung.com> 2013-07-25
+* 1.4.23
+- fixed one of Smack bugs which is about IP packet access check.
+== Kitae Kim <kt920.kim@samsung.com> 2013-06-28
+* 1.4.22
+- applied smack patches.
+== Kitae Kim <kt920.kim@samsung.com> 2013-05-29
+* 1.4.21
+- added initializing vqidx when the host keyboard is turned on.
+== Sungmin Ha <sungmin82.ha@samsung.com> 2013-05-21
+* 1.4.19
+- modified process of using virtio keyboard queue
+== Sungmin Ha <sungmin82.ha@samsung.com> 2013-05-14
+* 1.4.18
+- increased virtio keyboard queue
+== Sungmin Ha <sungmin82.ha@samsung.com> 2013-04-06
+* 1.4.17
+- added virtio hwkey driver and improved hwkey mapping
+== Sungmin Ha <sungmin82.ha@samsung.com> 2013-03-20
+* 1.4.15
+- source clean-up for overlay and backlight module.
+== Kitae Kim <kt920.kim@samsung.com> 2013-03-08
+* 1.4.14
+- Enable CONFIG_BLK_DEV_CRYPTOLOOP option.
+== Kitae Kim <kt920.kim@samsung.com> 2013-01-24
+* 1.4.13
+- modified block device name for sdcard
+== Sungmin Ha <sungmin82.ha@samsung.com> 2013-01-21
+* 1.4.12
+- Fixed a bug does not close when the device has been shut down with no streaming data.
+== Jinhyung Jo <jinhyung.jo@samsung.com> 2013-01-15
+* 1.4.11
+- Fixed a bug when audio some codecs are decoded simultaneously.
+- Source cleanup and codec driver can get and set offset of device memory for audio type.
+== Kitae Kim <kt920.kim@samsung.com> 2012-12-20
+* 1.4.5
+- Modified touchscreen range
+== GiWoong Kim <giwoong.kim@samsung.com> 2012-11-27
+* 1.4.3
+- fixed a bug when virtio-keyboard is removed.
+- input_free_device function cannot be used after calling input_unregister_device function.
+== Kita Kim <kt920.kim@samsung.com> 2012-11-13
+* 1.4.2
+- enable virtio-pci, virtio-touchscreen, net-9p, 9pfs drivers for arm kernel.
+- Those drivers are necessary for host fileshare feature and virtio-touchscreen-pci device on arm emulator.
+== Kita Kim <kt920.kim@samsung.com> 2012-11-09
+* 1.4.1
+- Do not ignore a touch event on emulator.
+- The current touch event may be same as the previous coordinate data.
+== GiWoong.kim <giwoong.kim@samsung.com> 2012-11-05
+* 1.3.24
+- The range of brightness level has been changed.(1 ~ 24 => 0 ~ 100)
+== Jinhyung.jo <jinhyung.jo@samsung.com> 2012-10-31
+* 1.3.23
+- Modify dibs build script for arm package.
+- Since arm toolchain has been installed on build environment, kernel source can support arm package.
+== Kitae Kim <kt920.kim@samsung.com> 2012-10-31
+* 1.3.22
+- Merge tizen_arm branch into develop branch.
+- Merge arm kernel source for emulator into x86 kernel source.
+== Kitae Kim <kt920.kim@samsung.com> 2012-10-30
+* 1.3.20
+- Change kernel keycode of KEY_MENU.
+- Since X key mapping has been changed, kernel keycode has to be changed.
+== GiWoong Kim <giwoong.kim@samsung.com> 2012-10-23
+* 1.3.19
+- Enable devtmpfs config.
+- Since udev module has been updated kernel need to enable devtmfs config.
+== Kitae Kim <kt920.kim@samsung.com> 2012-10-23
--- /dev/null
+Source: emulator-kernel
+Version: 4.4.0
+Maintainer: SeokYeon Hwang <syeon.hwang@samsung.com>
+
+Package: 3.0-emulator-kernel-x86
+OS: ubuntu-32, ubuntu-64, windows-32, windows-64, macos-64
+Build-host-os: ubuntu-64
+Description: Tizen x86 Emulator Kernel
+
+#Package: 3.0-emulator-kernel-arm
+#OS: ubuntu-32, ubuntu-64, windows-32, windows-64, macos-64
+#Build-host-os: ubuntu-64
+#Description: Tizen ARM Emulator Kernel
--- /dev/null
+#!/bin/busybox sh
+# SeokYeon Hwang (syeon.hwang@samsung.com)
+
+cmdline() {
+ local value
+ value=" $(/bin/busybox cat /proc/cmdline) "
+ value="${value##* $1=}"
+ value="${value%% *}"
+ [ "$value" != "" ] && echo "$value"
+}
+
+rescue_shell() {
+ echo -e "${COLOR_RED}Error !!! Dropping you to a shell.${NO_COLOR}"
+ /bin/busybox --install -s /bin
+ echo -e "${COLOR_BLUE}Opennng a shell on '/dev/hvc0'.${NO_COLOR}"
+ sh </dev/hvc0 >/dev/hvc0 2>&1 &
+
+ if [ -z $CONSOLE ]; then
+ CONSOLE="console"
+ fi
+
+ echo -e "${COLOR_BLUE}Opening a shell on '/dev/${CONSOLE}'.${NO_COLOR}"
+ export CONSOLE
+ exec setsid sh -c 'exec sh < /dev/${CONSOLE} >/dev/${CONSOLE} 2>&1'
+}
+
+mount_fs() {
+ /bin/busybox mount -t $FSTYPE -o rw $1 $2 || rescue_shell
+}
+
+/bin/busybox mkdir -p /dev
+/bin/busybox mount -t devtmpfs devtmpfs /dev
+
+exec >/dev/console 2>&1
+
+COLOR_BLUE="\033[1;34m" # light blue
+COLOR_RED="\033[1;31m" # light red
+NO_COLOR="\033[0m"
+
+# for debugging...
+#/bin/busybox ls -la /dev >> /dev/console
+
+echo -e "${COLOR_BLUE}Preparing...${NO_COLOR}"
+/bin/busybox mkdir -p /proc
+/bin/busybox mount -t proc proc /proc
+/bin/busybox mkdir -p /sys
+/bin/busybox mount -t sysfs sys /sys
+
+NEW_ROOT="/new_root"
+INIT=$(cmdline init)
+CONSOLE=$(cmdline console)
+ROOT=$(cmdline root)
+FSTYPE=$(cmdline fstype)
+
+# set default fs type
+if [ -z $FSTYPE]; then
+ FSTYPE=ext4
+fi
+
+# mount image...
+echo -e "${COLOR_BLUE}Mount image...${NO_COLOR}"
+/bin/busybox mkdir -p $NEW_ROOT
+if [ -z $ROOT ]; then
+ # mount rootfs...
+ # find rootfs...
+ ROOT=$(/bin/busybox findfs LABEL=emulator-rootfs)
+ if [ -z $ROOT ]; then
+ # for legacy image...
+ ROOT=$(/bin/busybox findfs LABEL=platform)
+ fi
+ if [ -z $ROOT ]; then
+ # failsafe...
+ ROOT=/dev/vda
+ fi
+
+ mount_fs $ROOT $NEW_ROOT
+
+ # mount system data area...
+ SYSDATA=$(/bin/busybox findfs LABEL=emulator-sysdata)
+ if [ ! -z $SYSDATA ]; then
+ mount_fs $SYSDATA $NEW_ROOT/opt
+ fi
+ # mount user area...
+ USER=$(/bin/busybox findfs LABEL=emulator-user)
+ if [ ! -z $USER ]; then
+ mount_fs $USER $NEW_ROOT/opt/usr
+ fi
+else
+ echo -e "${COLOR_BLUE}Mount ${ROOT}...${NO_COLOR}"
+ mount_fs $ROOT $NEW_ROOT
+fi
+
+# execute prerun scrpits...
+if [ -r $NEW_ROOT/etc/emulator/prerun ]; then
+ echo -e "${COLOR_BLUE}Prerun...${NO_COLOR}"
+ . $NEW_ROOT/etc/emulator/prerun $NEW_ROOT
+fi
+
+# clean up...
+/bin/busybox umount /proc
+/bin/busybox umount /sys
+#/bin/busybox umount /dev
+
+echo -e "${COLOR_BLUE}Switching root...${NO_COLOR}"
+if [ -z $INIT ]; then
+ INIT="/sbin/init"
+fi
+
+/bin/busybox mkdir -p $NEW_ROOT/dev
+/bin/busybox mount -t devtmpfs devtmpfs $NEW_ROOT/dev
+/bin/busybox mkdir -p $NEW_ROOT/sys
+/bin/busybox mount -t sysfs sys $NEW_ROOT/sys
+
+exec /bin/busybox switch_root -c /dev/console $NEW_ROOT $INIT
--- /dev/null
+# SeokYeon Hwang (syeon.hwang@samsung.com)
+
+# init script
+file /init ramfs/init 0755 0 0
+
+# busybox
+dir /bin 0755 0 0
+file /bin/busybox ramfs/busybox.i386 0755 0 0
+
+# vesafb user driver
+# We don't use vesafb anymore, but it is useful for debugging.
+dir /sbin 0755 0 0
+file /sbin/v86d ramfs/v86d 0755 0 0
+
+# for initial console
+dir /dev 0755 0 0
+nod /dev/console 0600 0 0 c 5 1
--- /dev/null
+# SeokYeon Hwang (syeon.hwang@samsung.com)
+
+# init script
+file /init ramfs/init 0755 0 0
+
+# busybox
+dir /bin 0755 0 0
+file /bin/busybox ramfs/busybox.x86_64 0755 0 0
+
+# for initial console
+dir /dev 0755 0 0
+nod /dev/console 0600 0 0 c 5 1