# CONFIG_AUDIT_ARCH is not set
CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
-CONFIG_X86_32_SMP=y
-CONFIG_X86_HT=y
CONFIG_X86_32_LAZY_GS=y
CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-ecx -fcall-saved-edx"
-CONFIG_ARCH_CPU_PROBE_RELEASE=y
CONFIG_ARCH_SUPPORTS_UPROBES=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_IRQ_WORK=y
#
# General setup
#
+CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_CROSS_COMPILE=""
# CONFIG_COMPILE_TEST is not set
#
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_IRQ_SHOW=y
-CONFIG_GENERIC_PENDING_IRQ=y
CONFIG_IRQ_FORCED_THREADING=y
CONFIG_SPARSE_IRQ=y
CONFIG_CLOCKSOURCE_WATCHDOG=y
CONFIG_KTIME_SCALAR=y
CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
-CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
CONFIG_GENERIC_CMOS_UPDATE=y
CONFIG_RCU_FANOUT=32
CONFIG_RCU_FANOUT_LEAF=16
# CONFIG_RCU_FANOUT_EXACT is not set
-# CONFIG_RCU_FAST_NO_HZ is not set
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_RCU_BOOST is not set
# CONFIG_RCU_NOCB_CPU is not set
# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
-CONFIG_SLUB_CPU_PARTIAL=y
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_DMA_CONTIGUOUS=y
-CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_GENERIC_SMP_IDLE_THREAD=y
CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
CONFIG_HAVE_DMA_API_DEBUG=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
# CONFIG_MODULE_SIG is not set
-CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
CONFIG_LBDAF=y
CONFIG_BLK_DEV_BSG=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
CONFIG_UNINLINE_SPIN_UNLOCK=y
-CONFIG_MUTEX_SPIN_ON_OWNER=y
CONFIG_FREEZER=y
#
# Processor type and features
#
CONFIG_ZONE_DMA=y
-CONFIG_SMP=y
-CONFIG_X86_MPPARSE=y
-# CONFIG_X86_BIGSMP is not set
+# CONFIG_SMP is not set
CONFIG_X86_EXTENDED_PLATFORM=y
# CONFIG_X86_GOLDFISH is not set
# CONFIG_X86_WANT_INTEL_MID is not set
# CONFIG_X86_INTEL_LPSS is not set
# CONFIG_X86_RDC321X is not set
-# CONFIG_X86_32_NON_STANDARD is not set
CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
# CONFIG_X86_32_IRIS is not set
CONFIG_SCHED_OMIT_FRAME_POINTER=y
CONFIG_CPU_SUP_TRANSMETA_32=y
# CONFIG_HPET_TIMER is not set
CONFIG_DMI=y
-CONFIG_NR_CPUS=8
-# CONFIG_SCHED_SMT is not set
-CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=1
# CONFIG_PREEMPT_NONE is not set
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_PREEMPT_COUNT=y
-CONFIG_X86_LOCAL_APIC=y
-CONFIG_X86_IO_APIC=y
-# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
+# CONFIG_X86_UP_APIC is not set
CONFIG_X86_MCE=y
-CONFIG_X86_MCE_INTEL=y
-CONFIG_X86_MCE_AMD=y
# CONFIG_X86_ANCIENT_MCE is not set
-CONFIG_X86_MCE_THRESHOLD=y
# CONFIG_X86_MCE_INJECT is not set
-CONFIG_X86_THERMAL_VECTOR=y
CONFIG_VM86=y
# CONFIG_TOSHIBA is not set
# CONFIG_I8K is not set
# CONFIG_MEMORY_FAILURE is not set
# CONFIG_TRANSPARENT_HUGEPAGE is not set
CONFIG_CROSS_MEMORY_ATTACH=y
+CONFIG_NEED_PER_CPU_KM=y
# CONFIG_CLEANCACHE is not set
# CONFIG_FRONTSWAP is not set
# CONFIG_CMA is not set
CONFIG_RELOCATABLE=y
CONFIG_X86_NEED_RELOCS=y
CONFIG_PHYSICAL_ALIGN=0x1000000
-CONFIG_HOTPLUG_CPU=y
-# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
-# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
# CONFIG_COMPAT_VDSO is not set
# CONFIG_CMDLINE_BOOL is not set
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION=""
CONFIG_PM_SLEEP=y
-CONFIG_PM_SLEEP_SMP=y
# CONFIG_PM_AUTOSLEEP is not set
# CONFIG_PM_WAKELOCKS is not set
# CONFIG_PM_RUNTIME is not set
CONFIG_ACPI_FAN=y
CONFIG_ACPI_DOCK=y
CONFIG_ACPI_PROCESSOR=y
-CONFIG_ACPI_HOTPLUG_CPU=y
# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set
CONFIG_ACPI_THERMAL=y
# CONFIG_ACPI_CUSTOM_DSDT is not set
# CONFIG_PCI_DEBUG is not set
# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
# CONFIG_PCI_STUB is not set
-CONFIG_HT_IRQ=y
# CONFIG_PCI_IOV is not set
# CONFIG_PCI_PRI is not set
# CONFIG_PCI_PASID is not set
CONFIG_PCCARD_NONSTATIC=y
CONFIG_HOTPLUG_PCI=y
# CONFIG_HOTPLUG_PCI_COMPAQ is not set
-# CONFIG_HOTPLUG_PCI_IBM is not set
CONFIG_HOTPLUG_PCI_ACPI=y
# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set
# CONFIG_HOTPLUG_PCI_CPCI is not set
# CONFIG_NETLINK_MMAP is not set
# CONFIG_NETLINK_DIAG is not set
# CONFIG_NET_MPLS_GSO is not set
-CONFIG_RPS=y
-CONFIG_RFS_ACCEL=y
-CONFIG_XPS=y
CONFIG_NETPRIO_CGROUP=y
CONFIG_NET_RX_BUSY_POLL=y
CONFIG_BQL=y
-CONFIG_NET_FLOW_LIMIT=y
#
# Network testing
# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
# CONFIG_THERMAL_GOV_FAIR_SHARE is not set
CONFIG_THERMAL_GOV_STEP_WISE=y
-CONFIG_THERMAL_GOV_USER_SPACE=y
+# CONFIG_THERMAL_GOV_USER_SPACE is not set
# CONFIG_CPU_THERMAL is not set
# CONFIG_THERMAL_EMULATION is not set
# CONFIG_INTEL_POWERCLAMP is not set
-CONFIG_X86_PKG_TEMP_THERMAL=m
#
# Texas Instruments thermal drivers
CONFIG_EDAC=y
CONFIG_EDAC_LEGACY_SYSFS=y
# CONFIG_EDAC_DEBUG is not set
-CONFIG_EDAC_DECODE_MCE=y
-# CONFIG_EDAC_MCE_INJ is not set
# CONFIG_EDAC_MM_EDAC is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_VIRTUAL is not set
CONFIG_DEBUG_MEMORY_INIT=y
-# CONFIG_DEBUG_PER_CPU_MAPS is not set
# CONFIG_DEBUG_HIGHMEM is not set
CONFIG_HAVE_DEBUG_STACKOVERFLOW=y
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_DEBUG_BOOT_PARAMS=y
# CONFIG_CPA_DEBUG is not set
CONFIG_OPTIMIZE_INLINING=y
-# CONFIG_DEBUG_NMI_SELFTEST is not set
# CONFIG_X86_DEBUG_STATIC_CPU_HAS is not set
#
CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
-# CONFIG_CRYPTO_PCRYPT is not set
CONFIG_CRYPTO_WORKQUEUE=y
# CONFIG_CRYPTO_CRYPTD is not set
CONFIG_CRYPTO_AUTHENC=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
CONFIG_CHECK_SIGNATURE=y
-CONFIG_CPU_RMAP=y
CONFIG_DQL=y
CONFIG_NLATTR=y
CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
struct drm_display_mode *preferred_mode =
drm_mode_create_from_cmdline_mode(drm_dev,
&cmdline_mode);
+
+ /* qHD workaround (540x960) */
+ if (cmdline_mode.xres == 540 && cmdline_mode.yres == 960) {
+ preferred_mode->hdisplay = cmdline_mode.xres;
+ preferred_mode->hsync_start = preferred_mode->hsync_start - 1;
+ preferred_mode->hsync_end = preferred_mode->hsync_end - 1;
+ }
+
preferred_mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+ drm_mode_set_crtcinfo(preferred_mode, CRTC_INTERLACE_HALVE_V);
drm_mode_probed_add(connector, preferred_mode);
return 1;
}
return;
}
- if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
- (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
- dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n",
- __func__, dj_report->device_index);
- return;
- }
-
if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
/* The device is already known. No need to reallocate it. */
dbg_hid("%s: device is already known\n", __func__);
* device (via hid_input_report() ) and return 1 so hid-core does not do
* anything else with it.
*/
+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
+ __func__, dj_report->device_index);
+ return false;
+ }
spin_lock_irqsave(&djrcv_dev->lock, flags);
if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
if (size < 4 || ((size - 4) % 9) != 0)
return 0;
npoints = (size - 4) / 9;
+ if (npoints > 15) {
+ hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n",
+ size);
+ return 0;
+ }
msc->ntouches = 0;
for (ii = 0; ii < npoints; ii++)
magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
if (size < 6 || ((size - 6) % 8) != 0)
return 0;
npoints = (size - 6) / 8;
+ if (npoints > 15) {
+ hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n",
+ size);
+ return 0;
+ }
msc->ntouches = 0;
for (ii = 0; ii < npoints; ii++)
magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
if (!data)
return 1;
+ if (size > 64) {
+ hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n",
+ size);
+ return 0;
+ }
+
if (report->id == REPORT_KEY_STATE) {
if (data->input_keys)
ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
#define CODEC_IRQ_TASK 0x1f
// DEBUG
-#ifdef CODEC_DEBUG
-#define DEBUG(fmt, ...) \
- printk(KERN_DEBUG "[%s][%d]: " fmt, DEVICE_NAME, __LINE__, ##__VA_ARGS__)
+int brillcodec_debug = 0;
+module_param(brillcodec_debug, int, 0644);
+MODULE_PARM_DESC(brillcodec_debug, "Turn on/off brillcodec debugging (default:off).");
-#define INFO(fmt, ...) \
- printk(KERN_INFO "[%s][%d]: " fmt, DEVICE_NAME, __LINE__, ##__VA_ARGS__)
-#else
-#define DEBUG(fmt, ...)
+#define CODEC_DBG(level, fmt, ...) \
+ do { \
+ if (brillcodec_debug > 0) { \
+ printk(level "[%s][%d]: " fmt, DEVICE_NAME, __LINE__, ##__VA_ARGS__); \
+ } \
+ } while (0)
-#define INFO(fmt, ...)
-#endif
-
-#define ERROR(fmt, ...) \
- printk(KERN_ERR "[%s][%d]: " fmt, DEVICE_NAME, __LINE__, ##__VA_ARGS__)
/* Define i/o and api values. */
enum codec_io_cmd {
{
uint32_t value;
- DEBUG("%s\n", __func__);
+ CODEC_DBG(KERN_DEBUG, "%s\n", __func__);
do {
value =
readl(maru_brill_codec->ioaddr + CODEC_CMD_GET_CTX_FROM_QUEUE);
- DEBUG("read a value from device %x.\n", value);
+ CODEC_DBG(KERN_DEBUG, "read a value from device %x.\n", value);
if (value) {
context_flags[value] = 1;
wake_up_interruptible(&wait_queue);
} else {
- DEBUG("there is no available task\n");
+ CODEC_DBG(KERN_DEBUG, "there is no available task\n");
}
} while (value);
}
static void codec_bh(struct maru_brill_codec_device *dev)
{
- DEBUG("add bottom-half function to codec_workqueue\n");
+ CODEC_DBG(KERN_DEBUG, "add bottom-half function to codec_workqueue\n");
queue_work(codec_bh_workqueue, &codec_bh_work);
}
} else if (buf_size < CODEC_L_DEVICE_MEM_SIZE) {
index = LARGE;
} else {
- ERROR("invalid buffer size: %x\n", buf_size);
+ CODEC_DBG(KERN_ERR, "invalid buffer size: %x\n", buf_size);
return -1;
}
block = &maru_brill_codec->memory_blocks[index];
// decrease buffer_semaphore
- DEBUG("before down buffer_sema: %d\n", block->semaphore.count);
+ CODEC_DBG(KERN_DEBUG, "before down buffer_sema: %d\n", block->semaphore.count);
if (non_blocking) {
if (down_trylock(&block->semaphore)) { // if 1
- DEBUG("buffer is not available now\n");
+ CODEC_DBG(KERN_DEBUG, "buffer is not available now\n");
return -1;
}
} else {
if (down_trylock(&block->semaphore)) { // if 1
if (down_interruptible(&block->last_buf_semaphore)) { // if -EINTR
- DEBUG("down_interruptible interrupted\n");
+ CODEC_DBG(KERN_DEBUG, "down_interruptible interrupted\n");
return -1;
}
block->last_buf_secured = 1; // protected under last_buf_semaphore
ret = 1;
- DEBUG("lock last buffer semaphore.\n");
+ CODEC_DBG(KERN_DEBUG, "lock last buffer semaphore.\n");
}
}
- DEBUG("after down buffer_sema: %d\n", block->semaphore.count);
+ CODEC_DBG(KERN_DEBUG, "after down buffer_sema: %d\n", block->semaphore.count);
mutex_lock(&block->access_mutex);
unit = list_first_entry(&block->available, struct device_mem, entry);
} else {
up(&block->semaphore);
}
- ERROR("failed to get memory block.\n");
+ CODEC_DBG(KERN_ERR, "failed to get memory block.\n");
} else {
unit->ctx_id = ctx_id;
list_move_tail(&unit->entry, &block->occupied);
*offset = unit->mem_offset;
- DEBUG("get available memory region: 0x%x\n", ret);
+ CODEC_DBG(KERN_DEBUG, "get available memory region: 0x%x\n", ret);
}
mutex_unlock(&block->access_mutex);
index = LARGE;
} else {
// error
- ERROR("invalid memory offsset. offset = 0x%x.\n", (uint32_t)mem_offset);
+ CODEC_DBG(KERN_ERR, "invalid memory offsset. offset = 0x%x.\n", (uint32_t)mem_offset);
return;
}
if (block->last_buf_secured) {
block->last_buf_secured = 0;
up(&block->last_buf_semaphore);
- DEBUG("unlock last buffer semaphore.\n");
+ CODEC_DBG(KERN_DEBUG, "unlock last buffer semaphore.\n");
} else {
up(&block->semaphore);
- DEBUG("unlock semaphore: %d.\n", block->semaphore.count);
+ CODEC_DBG(KERN_DEBUG, "unlock semaphore: %d.\n", block->semaphore.count);
}
found = true;
}
if (!found) {
// can not enter here...
- ERROR("cannot find this memory block. offset = 0x%x.\n", (uint32_t)mem_offset);
+ CODEC_DBG(KERN_ERR, "cannot find this memory block. offset = 0x%x.\n", (uint32_t)mem_offset);
}
} else {
// can not enter here...
- ERROR("there is not any using memory block.\n");
+ CODEC_DBG(KERN_ERR, "there is not any using memory block.\n");
}
mutex_unlock(&block->access_mutex);
}
if (unit->ctx_id == context_id) {
unit->ctx_id = 0;
list_move_tail(&unit->entry, &block->available);
- INFO("dispose memory block: %x", unit->mem_offset);
+ CODEC_DBG(KERN_INFO, "dispose memory block: %x", unit->mem_offset);
}
}
}
memaddr = ioremap(maru_brill_codec->mem_start,
maru_brill_codec->mem_size);
if (!memaddr) {
- ERROR("ioremap failed\n");
+ CODEC_DBG(KERN_ERR, "ioremap failed\n");
return;
}
codec_info =
kzalloc(codec_info_len, GFP_KERNEL);
if (!codec_info) {
- ERROR("falied to allocate codec_info memory!\n");
+ CODEC_DBG(KERN_ERR, "falied to allocate codec_info memory!\n");
return;
}
uint32_t offset = 0;
unsigned long flags;
- DEBUG("read data into small buffer\n");
+ CODEC_DBG(KERN_DEBUG, "read data into small buffer\n");
value = secure_device_memory(opaque->buffer_index, opaque->buffer_size, 0, &offset);
if (value < 0) {
- DEBUG("failed to get available memory\n");
+ CODEC_DBG(KERN_DEBUG, "failed to get available memory\n");
ret = -EINVAL;
} else {
- DEBUG("send a request to pop data from device. %d\n", opaque->buffer_index);
+ CODEC_DBG(KERN_DEBUG, "send a request to pop data from device. %d\n", opaque->buffer_index);
ENTER_CRITICAL_SECTION(flags);
writel((uint32_t)offset,
switch (cmd) {
case CODEC_CMD_GET_VERSION:
{
- DEBUG("%s version: %d\n", DEVICE_NAME, maru_brill_codec->version);
+ CODEC_DBG(KERN_DEBUG, "%s version: %d\n", DEVICE_NAME, maru_brill_codec->version);
if (copy_to_user((void *)arg, &maru_brill_codec->version, sizeof(int))) {
- ERROR("ioctl: failed to copy data to user\n");
+ CODEC_DBG(KERN_ERR, "ioctl: failed to copy data to user\n");
ret = -EIO;
}
break;
uint32_t len = 0;
unsigned long flags;
- DEBUG("request a device to get codec elements\n");
+ CODEC_DBG(KERN_DEBUG, "request a device to get codec elements\n");
ENTER_CRITICAL_SECTION(flags);
if (!maru_brill_codec->codec_elem_cached) {
value = readl(maru_brill_codec->ioaddr + cmd);
if (value < 0) {
- ERROR("ioctl: failed to get elements. %d\n", (int)value);
+ CODEC_DBG(KERN_ERR, "ioctl: failed to get elements. %d\n", (int)value);
ret = -EINVAL;
}
maru_brill_codec_info_cache();
LEAVE_CRITICAL_SECTION(flags);
if (copy_to_user((void *)arg, &len, sizeof(uint32_t))) {
- ERROR("ioctl: failed to copy data to user\n");
+ CODEC_DBG(KERN_ERR, "ioctl: failed to copy data to user\n");
ret = -EIO;
}
break;
void *codec_elem = NULL;
uint32_t elem_len = maru_brill_codec->codec_elem.buf_size;
- DEBUG("request codec elements.\n");
+ CODEC_DBG(KERN_DEBUG, "request codec elements.\n");
codec_elem = maru_brill_codec->codec_elem.buf;
if (!codec_elem) {
- ERROR("ioctl: codec elements is empty\n");
+ CODEC_DBG(KERN_ERR, "ioctl: codec elements is empty\n");
ret = -EIO;
} else if (copy_to_user((void *)arg, codec_elem, elem_len)) {
- ERROR("ioctl: failed to copy data to user\n");
+ CODEC_DBG(KERN_ERR, "ioctl: failed to copy data to user\n");
ret = -EIO;
}
break;
}
case CODEC_CMD_GET_CONTEXT_INDEX:
{
- DEBUG("request a device to get an index of codec context \n");
+ CODEC_DBG(KERN_DEBUG, "request a device to get an index of codec context \n");
value = readl(maru_brill_codec->ioaddr + cmd);
if (value < 1 || value > (CODEC_CONTEXT_SIZE - 1)) {
- ERROR("ioctl: failed to get proper context. %d\n", (int)value);
+ CODEC_DBG(KERN_ERR, "ioctl: failed to get proper context. %d\n", (int)value);
ret = -EINVAL;
} else {
// task_id & context_id
- DEBUG("add context. ctx_id: %d\n", (int)value);
+ CODEC_DBG(KERN_DEBUG, "add context. ctx_id: %d\n", (int)value);
context_add((uint32_t)file, value);
if (copy_to_user((void *)arg, &value, sizeof(int))) {
- ERROR("ioctl: failed to copy data to user\n");
+ CODEC_DBG(KERN_ERR, "ioctl: failed to copy data to user\n");
ret = -EIO;
}
}
struct codec_buffer_id opaque;
if (copy_from_user(&opaque, (void *)arg, sizeof(struct codec_buffer_id))) {
- ERROR("ioctl: failed to copy data from user\n");
+ CODEC_DBG(KERN_ERR, "ioctl: failed to copy data from user\n");
ret = -EIO;
break;
}
}
if (copy_to_user((void *)arg, &opaque, sizeof(struct codec_buffer_id))) {
- ERROR("ioctl: failed to copy data to user.\n");
+ CODEC_DBG(KERN_ERR, "ioctl: failed to copy data to user.\n");
ret = -EIO;
}
break;
uint32_t offset = 0;
struct codec_buffer_id opaque;
- DEBUG("read data into small buffer\n");
+ CODEC_DBG(KERN_DEBUG, "read data into small buffer\n");
if (copy_from_user(&opaque, (void *)arg, sizeof(struct codec_buffer_id))) {
- ERROR("ioctl: failed to copy data from user\n");
+ CODEC_DBG(KERN_ERR, "ioctl: failed to copy data from user\n");
ret = -EIO;
break;
}
value = secure_device_memory(opaque.buffer_index, opaque.buffer_size, 0, &offset);
if (value < 0) {
- DEBUG("failed to get available memory\n");
+ CODEC_DBG(KERN_DEBUG, "failed to get available memory\n");
ret = -EINVAL;
} else {
opaque.buffer_size = offset;
if (copy_to_user((void *)arg, &opaque, sizeof(struct codec_buffer_id))) {
- ERROR("ioctl: failed to copy data to user.\n");
+ CODEC_DBG(KERN_ERR, "ioctl: failed to copy data to user.\n");
ret = -EIO;
}
}
uint32_t offset = 0;
struct codec_buffer_id opaque;
- DEBUG("read data into small buffer\n");
+ CODEC_DBG(KERN_DEBUG, "read data into small buffer\n");
if (copy_from_user(&opaque, (void *)arg, sizeof(struct codec_buffer_id))) {
- ERROR("ioctl: failed to copy data from user\n");
+ CODEC_DBG(KERN_ERR, "ioctl: failed to copy data from user\n");
ret = -EIO;
break;
}
value = secure_device_memory(opaque.buffer_index, opaque.buffer_size, 1, &offset);
if (value < 0) {
- DEBUG("failed to get available memory\n");
+ CODEC_DBG(KERN_DEBUG, "failed to get available memory\n");
ret = -EINVAL;
} else {
opaque.buffer_size = offset;
if (copy_to_user((void *)arg, &opaque, sizeof(struct codec_buffer_id))) {
- ERROR("ioctl: failed to copy data to user.\n");
+ CODEC_DBG(KERN_ERR, "ioctl: failed to copy data to user.\n");
ret = -EIO;
}
}
uint32_t mem_offset;
if (copy_from_user(&mem_offset, (void *)arg, sizeof(uint32_t))) {
- ERROR("ioctl: failed to copy data from user\n");
+ CODEC_DBG(KERN_ERR, "ioctl: failed to copy data from user\n");
ret = -EIO;
break;
}
struct codec_param ioparam = { 0, };
if (copy_from_user(&ioparam, (void *)arg, sizeof(struct codec_param))) {
- ERROR("failed to get codec parameter info from user\n");
+ CODEC_DBG(KERN_ERR, "failed to get codec parameter info from user\n");
ret = -EIO;
break;
}
}
if (copy_to_user((void *)arg, &ioparam, sizeof(struct codec_param))) {
- ERROR("ioctl: failed to copy data to user.\n");
+ CODEC_DBG(KERN_ERR, "ioctl: failed to copy data to user.\n");
ret = -EIO;
}
}
}
break;
default:
- DEBUG("no available command.");
+ CODEC_DBG(KERN_DEBUG, "no available command.");
ret = -EINVAL;
break;
}
int api_index, ctx_index;
unsigned long flags;
- DEBUG("enter %s\n", __func__);
+ CODEC_DBG(KERN_DEBUG, "enter %s\n", __func__);
api_index = ioparam->api_index;
ctx_index = ioparam->ctx_index;
break;
}
default:
- DEBUG("invalid API commands: %d", api_index);
+ CODEC_DBG(KERN_DEBUG, "invalid API commands: %d", api_index);
return -1;
}
dispose_device_memory(ioparam->ctx_index);
}
- DEBUG("leave %s\n", __func__);
+ CODEC_DBG(KERN_DEBUG, "leave %s\n", __func__);
return 0;
}
size = vm->vm_end - vm->vm_start;
if (size > maru_brill_codec->mem_size) {
- ERROR("over mapping size\n");
+ CODEC_DBG(KERN_ERR, "over mapping size\n");
return -EINVAL;
}
off = vm->vm_pgoff << PAGE_SHIFT;
ret = remap_pfn_range(vm, vm->vm_start, phys_addr,
size, vm->vm_page_prot);
if (ret < 0) {
- ERROR("failed to remap page range\n");
+ CODEC_DBG(KERN_ERR, "failed to remap page range\n");
return -EAGAIN;
}
spin_lock_irqsave(&dev->lock, flags);
- DEBUG("handle an interrupt from codec device.\n");
+ CODEC_DBG(KERN_DEBUG, "handle an interrupt from codec device.\n");
codec_bh(dev);
spin_unlock_irqrestore(&dev->lock, flags);
struct context_id *cid_elem = NULL;
unsigned long flags;
- DEBUG("enter: %s\n", __func__);
+ CODEC_DBG(KERN_DEBUG, "enter: %s\n", __func__);
- DEBUG("before inserting context. user_pid: %x, ctx_id: %d\n",
+ CODEC_DBG(KERN_DEBUG, "before inserting context. user_pid: %x, ctx_id: %d\n",
user_pid, ctx_id);
ENTER_CRITICAL_SECTION(flags);
list_for_each_safe(pos, temp, &maru_brill_codec->user_pid_mgr) {
pid_elem = list_entry(pos, struct user_process_id, pid_node);
- DEBUG("add context. pid_elem: %p\n", pid_elem);
+ CODEC_DBG(KERN_DEBUG, "add context. pid_elem: %p\n", pid_elem);
if (pid_elem && pid_elem->id == user_pid) {
- DEBUG("add context. user_pid: %x, ctx_id: %d\n",
+ CODEC_DBG(KERN_DEBUG, "add context. user_pid: %x, ctx_id: %d\n",
user_pid, ctx_id);
cid_elem = kzalloc(sizeof(struct context_id), GFP_KERNEL);
if (!cid_elem) {
- ERROR("failed to allocate context_mgr memory\n");
+ CODEC_DBG(KERN_ERR, "failed to allocate context_mgr memory\n");
return;
}
INIT_LIST_HEAD(&cid_elem->node);
- DEBUG("add context. user_pid: %x, pid_elem: %p, cid_elem: %p, node: %p\n",
+ CODEC_DBG(KERN_DEBUG, "add context. user_pid: %x, pid_elem: %p, cid_elem: %p, node: %p\n",
user_pid, pid_elem, cid_elem, &cid_elem->node);
cid_elem->id = ctx_id;
}
}
} else {
- DEBUG("user_pid_mgr is empty\n");
+ CODEC_DBG(KERN_DEBUG, "user_pid_mgr is empty\n");
}
LEAVE_CRITICAL_SECTION(flags);
- DEBUG("leave: %s\n", __func__);
+ CODEC_DBG(KERN_DEBUG, "leave: %s\n", __func__);
}
static void maru_brill_codec_context_remove(struct user_process_id *pid_elem)
struct list_head *pos, *temp;
struct context_id *cid_elem = NULL;
- DEBUG("enter: %s\n", __func__);
+ CODEC_DBG(KERN_DEBUG, "enter: %s\n", __func__);
if (!list_empty(&pid_elem->ctx_id_mgr)) {
list_for_each_safe(pos, temp, &pid_elem->ctx_id_mgr) {
cid_elem = list_entry(pos, struct context_id, node);
if (cid_elem) {
if (cid_elem->id > 0 && cid_elem->id < CODEC_CONTEXT_SIZE) {
- DEBUG("remove context. ctx_id: %d\n", cid_elem->id);
+ CODEC_DBG(KERN_DEBUG, "remove context. ctx_id: %d\n", cid_elem->id);
writel(cid_elem->id,
maru_brill_codec->ioaddr + CODEC_CMD_RELEASE_CONTEXT);
dispose_device_memory(cid_elem->id);
}
- DEBUG("delete node from ctx_id_mgr. %p\n", &cid_elem->node);
+ CODEC_DBG(KERN_DEBUG, "delete node from ctx_id_mgr. %p\n", &cid_elem->node);
__list_del_entry(&cid_elem->node);
- DEBUG("release cid_elem. %p\n", cid_elem);
+ CODEC_DBG(KERN_DEBUG, "release cid_elem. %p\n", cid_elem);
kfree(cid_elem);
} else {
- DEBUG("no context in the pid_elem\n");
+ CODEC_DBG(KERN_DEBUG, "no context in the pid_elem\n");
}
}
} else {
- DEBUG("ctx_id_mgr is empty. user_pid: %x\n", pid_elem->id);
+ CODEC_DBG(KERN_DEBUG, "ctx_id_mgr is empty. user_pid: %x\n", pid_elem->id);
}
- DEBUG("leave: %s\n", __func__);
+ CODEC_DBG(KERN_DEBUG, "leave: %s\n", __func__);
}
static void maru_brill_codec_task_add(uint32_t user_pid)
struct user_process_id *pid_elem = NULL;
unsigned long flags;
- DEBUG("enter: %s\n", __func__);
+ CODEC_DBG(KERN_DEBUG, "enter: %s\n", __func__);
ENTER_CRITICAL_SECTION(flags);
pid_elem = kzalloc(sizeof(struct user_process_id), GFP_KERNEL);
if (!pid_elem) {
- ERROR("failed to allocate user_process memory\n");
+ CODEC_DBG(KERN_ERR, "failed to allocate user_process memory\n");
return;
}
INIT_LIST_HEAD(&pid_elem->pid_node);
INIT_LIST_HEAD(&pid_elem->ctx_id_mgr);
- DEBUG("add task. user_pid: %x, pid_elem: %p, pid_node: %p\n",
+ CODEC_DBG(KERN_DEBUG, "add task. user_pid: %x, pid_elem: %p, pid_node: %p\n",
user_pid, pid_elem, &pid_elem->pid_node);
pid_elem->id = user_pid;
list_add_tail(&pid_elem->pid_node, &maru_brill_codec->user_pid_mgr);
LEAVE_CRITICAL_SECTION(flags);
- DEBUG("leave: %s\n", __func__);
+ CODEC_DBG(KERN_DEBUG, "leave: %s\n", __func__);
}
static void maru_brill_codec_task_remove(uint32_t user_pid)
struct user_process_id *pid_elem = NULL;
unsigned long flags;
- DEBUG("enter: %s\n", __func__);
+ CODEC_DBG(KERN_DEBUG, "enter: %s\n", __func__);
ENTER_CRITICAL_SECTION(flags);
if (!list_empty(&maru_brill_codec->user_pid_mgr)) {
if (pid_elem) {
if (pid_elem->id == user_pid) {
// remove task and codec contexts that is running in the task.
- DEBUG("remove task. user_pid: %x, pid_elem: %p\n",
+ CODEC_DBG(KERN_DEBUG, "remove task. user_pid: %x, pid_elem: %p\n",
user_pid, pid_elem);
maru_brill_codec_context_remove(pid_elem);
}
- DEBUG("move pid_node from user_pid_mgr. %p\n", &pid_elem->pid_node);
+ CODEC_DBG(KERN_DEBUG, "move pid_node from user_pid_mgr. %p\n", &pid_elem->pid_node);
__list_del_entry(&pid_elem->pid_node);
- DEBUG("release pid_elem. %p\n", pid_elem);
+ CODEC_DBG(KERN_DEBUG, "release pid_elem. %p\n", pid_elem);
kfree(pid_elem);
} else {
- DEBUG("no task in the user_pid_mgr\n");
+ CODEC_DBG(KERN_DEBUG, "no task in the user_pid_mgr\n");
}
}
} else {
- DEBUG("user_pid_mgr is empty\n");
+ CODEC_DBG(KERN_DEBUG, "user_pid_mgr is empty\n");
}
LEAVE_CRITICAL_SECTION(flags);
- DEBUG("leave: %s\n", __func__);
+ CODEC_DBG(KERN_DEBUG, "leave: %s\n", __func__);
}
static int maru_brill_codec_open(struct inode *inode, struct file *file)
{
- DEBUG("open! struct file: %p\n", file);
+ CODEC_DBG(KERN_DEBUG, "open! struct file: %p\n", file);
/* register interrupt handler */
if (request_irq(maru_brill_codec->dev->irq, maru_brill_codec_irq_handler,
IRQF_SHARED, DEVICE_NAME, maru_brill_codec)) {
- ERROR("failed to register irq handle\n");
+ CODEC_DBG(KERN_ERR, "failed to register irq handle\n");
return -EBUSY;
}
static int maru_brill_codec_release(struct inode *inode, struct file *file)
{
- DEBUG("close! struct file: %p\n", file);
+ CODEC_DBG(KERN_DEBUG, "close! struct file: %p\n", file);
/* free irq */
if (maru_brill_codec->dev->irq) {
- DEBUG("free registered irq\n");
+ CODEC_DBG(KERN_DEBUG, "free registered irq\n");
free_irq(maru_brill_codec->dev->irq, maru_brill_codec);
}
- DEBUG("before removing task: %x\n", (uint32_t)file);
+ CODEC_DBG(KERN_DEBUG, "before removing task: %x\n", (uint32_t)file);
/* free resource */
maru_brill_codec_task_remove((uint32_t)file);
maru_brill_codec =
kzalloc(sizeof(struct maru_brill_codec_device), GFP_KERNEL);
if (!maru_brill_codec) {
- ERROR("Failed to allocate memory for codec.\n");
+ CODEC_DBG(KERN_ERR, "Failed to allocate memory for codec.\n");
return -ENOMEM;
}
spin_lock_init(&maru_brill_codec->lock);
if ((ret = pci_enable_device(pci_dev))) {
- ERROR("pci_enable_device failed\n");
+ CODEC_DBG(KERN_ERR, "pci_enable_device failed\n");
return ret;
}
pci_set_master(pci_dev);
maru_brill_codec->mem_start = pci_resource_start(pci_dev, 0);
maru_brill_codec->mem_size = pci_resource_len(pci_dev, 0);
if (!maru_brill_codec->mem_start) {
- ERROR("pci_resource_start failed\n");
+ CODEC_DBG(KERN_ERR, "pci_resource_start failed\n");
pci_disable_device(pci_dev);
return -ENODEV;
}
if (!request_mem_region(maru_brill_codec->mem_start,
maru_brill_codec->mem_size,
DEVICE_NAME)) {
- ERROR("request_mem_region failed\n");
+ CODEC_DBG(KERN_ERR, "request_mem_region failed\n");
pci_disable_device(pci_dev);
return -EINVAL;
}
maru_brill_codec->io_start = pci_resource_start(pci_dev, 1);
maru_brill_codec->io_size = pci_resource_len(pci_dev, 1);
if (!maru_brill_codec->io_start) {
- ERROR("pci_resource_start failed\n");
+ CODEC_DBG(KERN_ERR, "pci_resource_start failed\n");
release_mem_region(maru_brill_codec->mem_start, maru_brill_codec->mem_size);
pci_disable_device(pci_dev);
return -ENODEV;
if (!request_mem_region(maru_brill_codec->io_start,
maru_brill_codec->io_size,
DEVICE_NAME)) {
- ERROR("request_io_region failed\n");
+ CODEC_DBG(KERN_ERR, "request_io_region failed\n");
release_mem_region(maru_brill_codec->mem_start, maru_brill_codec->mem_size);
pci_disable_device(pci_dev);
return -EINVAL;
maru_brill_codec->ioaddr =
ioremap_nocache(maru_brill_codec->io_start, maru_brill_codec->io_size);
if (!maru_brill_codec->ioaddr) {
- ERROR("ioremap failed\n");
+ CODEC_DBG(KERN_ERR, "ioremap failed\n");
release_mem_region(maru_brill_codec->io_start, maru_brill_codec->io_size);
release_mem_region(maru_brill_codec->mem_start, maru_brill_codec->mem_size);
pci_disable_device(pci_dev);
maru_brill_codec_get_device_version();
if ((ret = misc_register(&codec_dev))) {
- ERROR("cannot register codec as misc\n");
+ CODEC_DBG(KERN_ERR, "cannot register codec as misc\n");
iounmap(maru_brill_codec->ioaddr);
release_mem_region(maru_brill_codec->io_start, maru_brill_codec->io_size);
release_mem_region(maru_brill_codec->mem_start, maru_brill_codec->mem_size);
codec_bh_workqueue = create_workqueue ("maru_brill_codec");
if (!codec_bh_workqueue) {
- ERROR("failed to allocate workqueue\n");
+ CODEC_DBG(KERN_ERR, "failed to allocate workqueue\n");
return -ENOMEM;
}
0x00, 0x00, 0x00, 0x00,
0x00, 0x00 };
+ if (cmd->msg_len > sizeof(b) - 4)
+ return -EINVAL;
+
memcpy(&b[4], cmd->msg, cmd->msg_len);
state->config->send_command(fe, 0x72,
dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__);
return;
}
+ if (!urb->actual_length) {
+ dev_dbg(&urb->dev->dev, "%s - empty response, exiting.\n", __func__);
+ return;
+ }
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status);
if (status != -ENOENT)
/* These are unsolicited reports from the firmware, hence no
waiting command to wakeup */
dev_dbg(&urb->dev->dev, "%s - event received\n", __func__);
- } else if (data[0] == WHITEHEAT_GET_DTR_RTS) {
+ } else if ((data[0] == WHITEHEAT_GET_DTR_RTS) &&
+ (urb->actual_length - 1 <= sizeof(command_info->result_buffer))) {
memcpy(command_info->result_buffer, &data[1],
urb->actual_length - 1);
command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
return;
}
-static int isofs_read_inode(struct inode *);
+static int isofs_read_inode(struct inode *, int relocated);
static int isofs_statfs (struct dentry *, struct kstatfs *);
static struct kmem_cache *isofs_inode_cachep;
goto out;
}
-static int isofs_read_inode(struct inode *inode)
+static int isofs_read_inode(struct inode *inode, int relocated)
{
struct super_block *sb = inode->i_sb;
struct isofs_sb_info *sbi = ISOFS_SB(sb);
*/
if (!high_sierra) {
- parse_rock_ridge_inode(de, inode);
+ parse_rock_ridge_inode(de, inode, relocated);
/* if we want uid/gid set, override the rock ridge setting */
if (sbi->s_uid_set)
inode->i_uid = sbi->s_uid;
* offset that point to the underlying meta-data for the inode. The
* code below is otherwise similar to the iget() code in
* include/linux/fs.h */
-struct inode *isofs_iget(struct super_block *sb,
- unsigned long block,
- unsigned long offset)
+struct inode *__isofs_iget(struct super_block *sb,
+ unsigned long block,
+ unsigned long offset,
+ int relocated)
{
unsigned long hashval;
struct inode *inode;
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
- ret = isofs_read_inode(inode);
+ ret = isofs_read_inode(inode, relocated);
if (ret < 0) {
iget_failed(inode);
inode = ERR_PTR(ret);
struct inode; /* To make gcc happy */
-extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *);
+extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *, int relocated);
extern int get_rock_ridge_filename(struct iso_directory_record *, char *, struct inode *);
extern int isofs_name_translate(struct iso_directory_record *, char *, struct inode *);
extern struct buffer_head *isofs_bread(struct inode *, sector_t);
extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long);
-extern struct inode *isofs_iget(struct super_block *sb,
- unsigned long block,
- unsigned long offset);
+struct inode *__isofs_iget(struct super_block *sb,
+ unsigned long block,
+ unsigned long offset,
+ int relocated);
+
+static inline struct inode *isofs_iget(struct super_block *sb,
+ unsigned long block,
+ unsigned long offset)
+{
+ return __isofs_iget(sb, block, offset, 0);
+}
+
+static inline struct inode *isofs_iget_reloc(struct super_block *sb,
+ unsigned long block,
+ unsigned long offset)
+{
+ return __isofs_iget(sb, block, offset, 1);
+}
/* Because the inode number is no longer relevant to finding the
* underlying meta-data for an inode, we are free to choose a more
int cont_size;
int cont_extent;
int cont_offset;
+ int cont_loops;
struct inode *inode;
};
rs->inode = inode;
}
+/* Maximum number of Rock Ridge continuation entries */
+#define RR_MAX_CE_ENTRIES 32
+
/*
* Returns 0 if the caller should continue scanning, 1 if the scan must end
* and -ve on error.
goto out;
}
ret = -EIO;
+ if (++rs->cont_loops >= RR_MAX_CE_ENTRIES)
+ goto out;
bh = sb_bread(rs->inode->i_sb, rs->cont_extent);
if (bh) {
memcpy(rs->buffer, bh->b_data + rs->cont_offset,
goto out;
}
+#define RR_REGARD_XA 1
+#define RR_RELOC_DE 2
+
static int
parse_rock_ridge_inode_internal(struct iso_directory_record *de,
- struct inode *inode, int regard_xa)
+ struct inode *inode, int flags)
{
int symlink_len = 0;
int cnt, sig;
+ unsigned int reloc_block;
struct inode *reloc;
struct rock_ridge *rr;
int rootflag;
init_rock_state(&rs, inode);
setup_rock_ridge(de, inode, &rs);
- if (regard_xa) {
+ if (flags & RR_REGARD_XA) {
rs.chr += 14;
rs.len -= 14;
if (rs.len < 0)
rs.cont_size = isonum_733(rr->u.CE.size);
break;
case SIG('E', 'R'):
+ /* Invalid length of ER tag id? */
+ if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len)
+ goto out;
ISOFS_SB(inode->i_sb)->s_rock = 1;
printk(KERN_DEBUG "ISO 9660 Extensions: ");
{
"relocated directory\n");
goto out;
case SIG('C', 'L'):
- ISOFS_I(inode)->i_first_extent =
- isonum_733(rr->u.CL.location);
- reloc =
- isofs_iget(inode->i_sb,
- ISOFS_I(inode)->i_first_extent,
- 0);
+ if (flags & RR_RELOC_DE) {
+ printk(KERN_ERR
+ "ISOFS: Recursive directory relocation "
+ "is not supported\n");
+ goto eio;
+ }
+ reloc_block = isonum_733(rr->u.CL.location);
+ if (reloc_block == ISOFS_I(inode)->i_iget5_block &&
+ ISOFS_I(inode)->i_iget5_offset == 0) {
+ printk(KERN_ERR
+ "ISOFS: Directory relocation points to "
+ "itself\n");
+ goto eio;
+ }
+ ISOFS_I(inode)->i_first_extent = reloc_block;
+ reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0);
if (IS_ERR(reloc)) {
ret = PTR_ERR(reloc);
goto out;
return rpnt;
}
-int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
+int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
+ int relocated)
{
- int result = parse_rock_ridge_inode_internal(de, inode, 0);
+ int flags = relocated ? RR_RELOC_DE : 0;
+ int result = parse_rock_ridge_inode_internal(de, inode, flags);
/*
* if rockridge flag was reset and we didn't look for attributes
*/
if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1)
&& (ISOFS_SB(inode->i_sb)->s_rock == 2)) {
- result = parse_rock_ridge_inode_internal(de, inode, 14);
+ result = parse_rock_ridge_inode_internal(de, inode,
+ flags | RR_REGARD_XA);
}
return result;
}
mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
/* Don't allow unprivileged users to change mount flags */
- if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
- mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
+ if (flag & CL_UNPRIVILEGED) {
+ mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
+
+ if (mnt->mnt.mnt_flags & MNT_READONLY)
+ mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
+
+ if (mnt->mnt.mnt_flags & MNT_NODEV)
+ mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
+
+ if (mnt->mnt.mnt_flags & MNT_NOSUID)
+ mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
+
+ if (mnt->mnt.mnt_flags & MNT_NOEXEC)
+ mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
+ }
/* Don't allow unprivileged users to reveal what is under a mount */
if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire))
if (readonly_request == __mnt_is_readonly(mnt))
return 0;
- if (mnt->mnt_flags & MNT_LOCK_READONLY)
- return -EPERM;
-
if (readonly_request)
error = mnt_make_readonly(real_mount(mnt));
else
if (path->dentry != path->mnt->mnt_root)
return -EINVAL;
+ /* Don't allow changing of locked mnt flags.
+ *
+ * No locks need to be held here while testing the various
+ * MNT_LOCK flags because those flags can never be cleared
+ * once they are set.
+ */
+ if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
+ !(mnt_flags & MNT_READONLY)) {
+ return -EPERM;
+ }
+ if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
+ !(mnt_flags & MNT_NODEV)) {
+ return -EPERM;
+ }
+ if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
+ !(mnt_flags & MNT_NOSUID)) {
+ return -EPERM;
+ }
+ if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) &&
+ !(mnt_flags & MNT_NOEXEC)) {
+ return -EPERM;
+ }
+ if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
+ ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) {
+ return -EPERM;
+ }
+
err = security_sb_remount(sb, data);
if (err)
return err;
err = do_remount_sb(sb, flags, data, 0);
if (!err) {
br_write_lock(&vfsmount_lock);
- mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
+ mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
mnt->mnt.mnt_flags = mnt_flags;
br_write_unlock(&vfsmount_lock);
}
*/
if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
flags |= MS_NODEV;
- mnt_flags |= MNT_NODEV;
+ mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
}
}
if (flags & MS_RDONLY)
mnt_flags |= MNT_READONLY;
+ /* The default atime for remount is preservation */
+ if ((flags & MS_REMOUNT) &&
+ ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
+ MS_STRICTATIME)) == 0)) {
+ mnt_flags &= ~MNT_ATIME_MASK;
+ mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
+ }
+
flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
MS_STRICTATIME);
* flag, consider how it interacts with shared mounts.
*/
#define MNT_SHARED_MASK (MNT_UNBINDABLE)
-#define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE)
-
+#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
+ | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
+ | MNT_READONLY)
+#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
#define MNT_INTERNAL 0x4000
+#define MNT_LOCK_ATIME 0x040000
+#define MNT_LOCK_NOEXEC 0x080000
+#define MNT_LOCK_NOSUID 0x100000
+#define MNT_LOCK_NODEV 0x200000
#define MNT_LOCK_READONLY 0x400000
#define MNT_LOCKED 0x800000
asoc->pmtu_pending = 0;
}
+static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
+{
+ return !list_empty(&chunk->list);
+}
+
/* Walk through a list of TLV parameters. Don't trust the
* individual parameter lengths and instead depend on
* the chunk length to indicate when to stop. Make sure
int, __be16);
struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
union sctp_addr *addr);
-int sctp_verify_asconf(const struct sctp_association *asoc,
- struct sctp_paramhdr *param_hdr, void *chunk_end,
- struct sctp_paramhdr **errp);
+bool sctp_verify_asconf(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk, bool addr_param_needed,
+ struct sctp_paramhdr **errp);
struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
struct sctp_chunk *asconf);
int sctp_process_asconf_ack(struct sctp_association *asoc,
int user_ctl_count; /* count of all user controls */
struct list_head controls; /* all controls for this card */
struct list_head ctl_files; /* active control files */
+ struct mutex user_ctl_lock; /* protects user controls against
+ concurrent access */
struct snd_info_entry *proc_root; /* root for soundcard specific files */
struct snd_info_entry *proc_id; /* the card id */
return -EDEADLK;
/*
- * Surprise - we got the lock. Just return to userspace:
+ * Surprise - we got the lock, but we do not trust user space at all.
*/
- if (unlikely(!curval))
- return 1;
+ if (unlikely(!curval)) {
+ /*
+ * We verify whether there is kernel state for this
+ * futex. If not, we can safely assume, that the 0 ->
+ * TID transition is correct. If state exists, we do
+ * not bother to fixup the user space state as it was
+ * corrupted already.
+ */
+ return futex_top_waiter(hb, key) ? -EINVAL : 1;
+ }
uval = curval;
struct task_struct *new_owner;
struct futex_pi_state *pi_state = this->pi_state;
u32 uninitialized_var(curval), newval;
+ int ret = 0;
if (!pi_state)
return -EINVAL;
new_owner = this->task;
/*
- * We pass it to the next owner. (The WAITERS bit is always
- * kept enabled while there is PI state around. We must also
- * preserve the owner died bit.)
+ * We pass it to the next owner. The WAITERS bit is always
+ * kept enabled while there is PI state around. We cleanup the
+ * owner died bit, because we are the owner.
*/
- if (!(uval & FUTEX_OWNER_DIED)) {
- int ret = 0;
-
- newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+ newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
- ret = -EFAULT;
- else if (curval != uval)
- ret = -EINVAL;
- if (ret) {
- raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
- return ret;
- }
+ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
+ ret = -EFAULT;
+ else if (curval != uval)
+ ret = -EINVAL;
+ if (ret) {
+ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+ return ret;
}
raw_spin_lock_irq(&pi_state->owner->pi_lock);
u32 curval2;
if (requeue_pi) {
+ /*
+ * Requeue PI only works on two distinct uaddrs. This
+ * check is only valid for private futexes. See below.
+ */
+ if (uaddr1 == uaddr2)
+ return -EINVAL;
+
/*
* requeue_pi requires a pi_state, try to allocate it now
* without any locks in case it fails.
if (unlikely(ret != 0))
goto out_put_key1;
+ /*
+ * The check above which compares uaddrs is not sufficient for
+ * shared futexes. We need to compare the keys:
+ */
+ if (requeue_pi && match_futex(&key1, &key2)) {
+ ret = -EINVAL;
+ goto out_put_keys;
+ }
+
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
/*
* To avoid races, try to do the TID -> 0 atomic transition
* again. If it succeeds then we can return without waking
- * anyone else up:
+ * anyone else up. We only try this if neither the waiters nor
+ * the owner died bit are set.
*/
- if (!(uval & FUTEX_OWNER_DIED) &&
+ if (!(uval & ~FUTEX_TID_MASK) &&
cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
goto pi_faulted;
/*
/*
* No waiters - kernel unlocks the futex:
*/
- if (!(uval & FUTEX_OWNER_DIED)) {
- ret = unlock_futex_pi(uaddr, uval);
- if (ret == -EFAULT)
- goto pi_faulted;
- }
+ ret = unlock_futex_pi(uaddr, uval);
+ if (ret == -EFAULT)
+ goto pi_faulted;
out_unlock:
spin_unlock(&hb->lock);
if (ret)
goto out_key2;
+ /*
+ * The check above which compares uaddrs is not sufficient for
+ * shared futexes. We need to compare the keys:
+ */
+ if (match_futex(&q.key, &key2)) {
+ ret = -EINVAL;
+ goto out_put_keys;
+ }
+
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
struct user_namespace *ns, int cap_setid,
struct uid_gid_map *new_map)
{
- /* Allow mapping to your own filesystem ids */
+ /* Don't allow mappings that would allow anything that wouldn't
+ * be allowed without the establishment of unprivileged mappings.
+ */
if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) {
u32 id = new_map->extent[0].lower_first;
if (cap_setid == CAP_SETUID) {
len = *ip++;
for (; len == 255; length += 255)
len = *ip++;
+ if (unlikely(length > (size_t)(length + len)))
+ goto _output_error;
length += len;
}
#include <linux/lzo.h>
#include "lzodefs.h"
-#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
-#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
-#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
-#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
-#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
+#define HAVE_IP(t, x) \
+ (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
+ (((t + x) >= t) && ((t + x) >= x)))
+
+#define HAVE_OP(t, x) \
+ (((size_t)(op_end - op) >= (size_t)(t + x)) && \
+ (((t + x) >= t) && ((t + x) >= x)))
+
+#define NEED_IP(t, x) \
+ do { \
+ if (!HAVE_IP(t, x)) \
+ goto input_overrun; \
+ } while (0)
+
+#define NEED_OP(t, x) \
+ do { \
+ if (!HAVE_OP(t, x)) \
+ goto output_overrun; \
+ } while (0)
+
+#define TEST_LB(m_pos) \
+ do { \
+ if ((m_pos) < out) \
+ goto lookbehind_overrun; \
+ } while (0)
int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len)
while (unlikely(*ip == 0)) {
t += 255;
ip++;
- NEED_IP(1);
+ NEED_IP(1, 0);
}
t += 15 + *ip++;
}
t += 3;
copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
+ if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
const unsigned char *ie = ip + t;
unsigned char *oe = op + t;
do {
} else
#endif
{
- NEED_OP(t);
- NEED_IP(t + 3);
+ NEED_OP(t, 0);
+ NEED_IP(t, 3);
do {
*op++ = *ip++;
} while (--t > 0);
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
TEST_LB(m_pos);
- NEED_OP(2);
+ NEED_OP(2, 0);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
while (unlikely(*ip == 0)) {
t += 255;
ip++;
- NEED_IP(1);
+ NEED_IP(1, 0);
}
t += 31 + *ip++;
- NEED_IP(2);
+ NEED_IP(2, 0);
}
m_pos = op - 1;
next = get_unaligned_le16(ip);
while (unlikely(*ip == 0)) {
t += 255;
ip++;
- NEED_IP(1);
+ NEED_IP(1, 0);
}
t += 7 + *ip++;
- NEED_IP(2);
+ NEED_IP(2, 0);
}
next = get_unaligned_le16(ip);
ip += 2;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (op - m_pos >= 8) {
unsigned char *oe = op + t;
- if (likely(HAVE_OP(t + 15))) {
+ if (likely(HAVE_OP(t, 15))) {
do {
COPY8(op, m_pos);
op += 8;
m_pos += 8;
} while (op < oe);
op = oe;
- if (HAVE_IP(6)) {
+ if (HAVE_IP(6, 0)) {
state = next;
COPY4(op, ip);
op += next;
continue;
}
} else {
- NEED_OP(t);
+ NEED_OP(t, 0);
do {
*op++ = *m_pos++;
} while (op < oe);
#endif
{
unsigned char *oe = op + t;
- NEED_OP(t);
+ NEED_OP(t, 0);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
state = next;
t = next;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- if (likely(HAVE_IP(6) && HAVE_OP(4))) {
+ if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
COPY4(op, ip);
op += t;
ip += t;
} else
#endif
{
- NEED_IP(t + 3);
- NEED_OP(t);
+ NEED_IP(t, 3);
+ NEED_OP(t, 0);
while (t > 0) {
*op++ = *ip++;
t--;
#define SHORT_SYMLINK_LEN 128
/*
- * shmem_fallocate and shmem_writepage communicate via inode->i_private
- * (with i_mutex making sure that it has only one user at a time):
- * we would prefer not to enlarge the shmem inode just for that.
+ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
+ * inode->i_private (with i_mutex making sure that it has only one user at
+ * a time): we would prefer not to enlarge the shmem inode just for that.
*/
struct shmem_falloc {
+ wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
pgoff_t start; /* start of range currently being fallocated */
pgoff_t next; /* the next page offset to be fallocated */
pgoff_t nr_falloced; /* how many new pages have been fallocated */
spin_lock(&inode->i_lock);
shmem_falloc = inode->i_private;
if (shmem_falloc &&
+ !shmem_falloc->waitq &&
index >= shmem_falloc->start &&
index < shmem_falloc->next)
shmem_falloc->nr_unswapped++;
int error;
int ret = VM_FAULT_LOCKED;
+ /*
+ * Trinity finds that probing a hole which tmpfs is punching can
+ * prevent the hole-punch from ever completing: which in turn
+ * locks writers out with its hold on i_mutex. So refrain from
+ * faulting pages into the hole while it's being punched. Although
+ * shmem_undo_range() does remove the additions, it may be unable to
+ * keep up, as each new page needs its own unmap_mapping_range() call,
+ * and the i_mmap tree grows ever slower to scan if new vmas are added.
+ *
+ * It does not matter if we sometimes reach this check just before the
+ * hole-punch begins, so that one fault then races with the punch:
+ * we just need to make racing faults a rare case.
+ *
+ * The implementation below would be much simpler if we just used a
+ * standard mutex or completion: but we cannot take i_mutex in fault,
+ * and bloating every shmem inode for this unlikely case would be sad.
+ */
+ if (unlikely(inode->i_private)) {
+ struct shmem_falloc *shmem_falloc;
+
+ spin_lock(&inode->i_lock);
+ shmem_falloc = inode->i_private;
+ if (shmem_falloc &&
+ shmem_falloc->waitq &&
+ vmf->pgoff >= shmem_falloc->start &&
+ vmf->pgoff < shmem_falloc->next) {
+ wait_queue_head_t *shmem_falloc_waitq;
+ DEFINE_WAIT(shmem_fault_wait);
+
+ ret = VM_FAULT_NOPAGE;
+ if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
+ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ /* It's polite to up mmap_sem if we can */
+ up_read(&vma->vm_mm->mmap_sem);
+ ret = VM_FAULT_RETRY;
+ }
+
+ shmem_falloc_waitq = shmem_falloc->waitq;
+ prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock(&inode->i_lock);
+ schedule();
+
+ /*
+ * shmem_falloc_waitq points into the shmem_fallocate()
+ * stack of the hole-punching task: shmem_falloc_waitq
+ * is usually invalid by the time we reach here, but
+ * finish_wait() does not dereference it in that case;
+ * though i_lock needed lest racing with wake_up_all().
+ */
+ spin_lock(&inode->i_lock);
+ finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
+ spin_unlock(&inode->i_lock);
+ return ret;
+ }
+ spin_unlock(&inode->i_lock);
+ }
+
error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
if (error)
return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
struct address_space *mapping = file->f_mapping;
loff_t unmap_start = round_up(offset, PAGE_SIZE);
loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
+
+ shmem_falloc.waitq = &shmem_falloc_waitq;
+ shmem_falloc.start = unmap_start >> PAGE_SHIFT;
+ shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
+ spin_lock(&inode->i_lock);
+ inode->i_private = &shmem_falloc;
+ spin_unlock(&inode->i_lock);
if ((u64)unmap_end > (u64)unmap_start)
unmap_mapping_range(mapping, unmap_start,
1 + unmap_end - unmap_start, 0);
shmem_truncate_range(inode, offset, offset + len - 1);
/* No need to unmap again: hole-punching leaves COWed pages */
+
+ spin_lock(&inode->i_lock);
+ inode->i_private = NULL;
+ wake_up_all(&shmem_falloc_waitq);
+ spin_unlock(&inode->i_lock);
error = 0;
goto out;
}
goto out;
}
+ shmem_falloc.waitq = NULL;
shmem_falloc.start = start;
shmem_falloc.next = start;
shmem_falloc.nr_falloced = 0;
static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
+static bool nf_generic_should_process(u8 proto)
+{
+ switch (proto) {
+#ifdef CONFIG_NF_CT_PROTO_SCTP_MODULE
+ case IPPROTO_SCTP:
+ return false;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP_MODULE
+ case IPPROTO_DCCP:
+ return false;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE_MODULE
+ case IPPROTO_GRE:
+ return false;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE_MODULE
+ case IPPROTO_UDPLITE:
+ return false;
+#endif
+ default:
+ return true;
+ }
+}
+
static inline struct nf_generic_net *generic_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.generic;
static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, unsigned int *timeouts)
{
- return true;
+ return nf_generic_should_process(nf_ct_protonum(ct));
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
/* Only real associations count against the endpoint, so
* don't bother for if this is a temporary association.
*/
- if (!asoc->temp) {
+ if (!list_empty(&asoc->asocs)) {
list_del(&asoc->asocs);
/* Decrement the backlog value for a TCP-style listening
* ack chunk whose serial number matches that of the request.
*/
list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
+ if (sctp_chunk_pending(ack))
+ continue;
if (ack->subh.addip_hdr->serial == serial) {
sctp_chunk_hold(ack);
return ack;
} else {
/* Nothing to do. Next chunk in the packet, please. */
ch = (sctp_chunkhdr_t *) chunk->chunk_end;
-
/* Force chunk->skb->data to chunk->chunk_end. */
- skb_pull(chunk->skb,
- chunk->chunk_end - chunk->skb->data);
-
- /* Verify that we have at least chunk headers
- * worth of buffer left.
- */
- if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
- sctp_chunk_free(chunk);
- chunk = queue->in_progress = NULL;
- }
+ skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
+ /* We are guaranteed to pull a SCTP header. */
}
}
skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
chunk->subh.v = NULL; /* Subheader is no longer valid. */
- if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) {
+ if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
+ skb_tail_pointer(chunk->skb)) {
/* This is not a singleton */
chunk->singleton = 0;
} else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
- /* RFC 2960, Section 6.10 Bundling
- *
- * Partial chunks MUST NOT be placed in an SCTP packet.
- * If the receiver detects a partial chunk, it MUST drop
- * the chunk.
- *
- * Since the end of the chunk is past the end of our buffer
- * (which contains the whole packet, we can freely discard
- * the whole packet.
- */
- sctp_chunk_free(chunk);
- chunk = queue->in_progress = NULL;
-
- return NULL;
+ /* Discard inside state machine. */
+ chunk->pdiscard = 1;
+ chunk->chunk_end = skb_tail_pointer(chunk->skb);
} else {
/* We are at the end of the packet, so mark the chunk
* in case we need to send a SACK.
return SCTP_ERROR_NO_ERROR;
}
-/* Verify the ASCONF packet before we process it. */
-int sctp_verify_asconf(const struct sctp_association *asoc,
- struct sctp_paramhdr *param_hdr, void *chunk_end,
- struct sctp_paramhdr **errp) {
- sctp_addip_param_t *asconf_param;
+/* Verify the ASCONF packet before we process it. */
+bool sctp_verify_asconf(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk, bool addr_param_needed,
+ struct sctp_paramhdr **errp)
+{
+ sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr;
union sctp_params param;
- int length, plen;
-
- param.v = (sctp_paramhdr_t *) param_hdr;
- while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
- length = ntohs(param.p->length);
- *errp = param.p;
+ bool addr_param_seen = false;
- if (param.v > chunk_end - length ||
- length < sizeof(sctp_paramhdr_t))
- return 0;
+ sctp_walk_params(param, addip, addip_hdr.params) {
+ size_t length = ntohs(param.p->length);
+ *errp = param.p;
switch (param.p->type) {
+ case SCTP_PARAM_ERR_CAUSE:
+ break;
+ case SCTP_PARAM_IPV4_ADDRESS:
+ if (length != sizeof(sctp_ipv4addr_param_t))
+ return false;
+ addr_param_seen = true;
+ break;
+ case SCTP_PARAM_IPV6_ADDRESS:
+ if (length != sizeof(sctp_ipv6addr_param_t))
+ return false;
+ addr_param_seen = true;
+ break;
case SCTP_PARAM_ADD_IP:
case SCTP_PARAM_DEL_IP:
case SCTP_PARAM_SET_PRIMARY:
- asconf_param = (sctp_addip_param_t *)param.v;
- plen = ntohs(asconf_param->param_hdr.length);
- if (plen < sizeof(sctp_addip_param_t) +
- sizeof(sctp_paramhdr_t))
- return 0;
+ /* In ASCONF chunks, these need to be first. */
+ if (addr_param_needed && !addr_param_seen)
+ return false;
+ length = ntohs(param.addip->param_hdr.length);
+ if (length < sizeof(sctp_addip_param_t) +
+ sizeof(sctp_paramhdr_t))
+ return false;
break;
case SCTP_PARAM_SUCCESS_REPORT:
case SCTP_PARAM_ADAPTATION_LAYER_IND:
if (length != sizeof(sctp_addip_param_t))
- return 0;
-
+ return false;
break;
default:
- break;
+ /* This is unkown to us, reject! */
+ return false;
}
-
- param.v += WORD_ROUND(length);
}
- if (param.v != chunk_end)
- return 0;
+ /* Remaining sanity checks. */
+ if (addr_param_needed && !addr_param_seen)
+ return false;
+ if (!addr_param_needed && addr_param_seen)
+ return false;
+ if (param.v != chunk->chunk_end)
+ return false;
- return 1;
+ return true;
}
/* Process an incoming ASCONF chunk with the next expected serial no. and
struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
struct sctp_chunk *asconf)
{
+ sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr;
+ bool all_param_pass = true;
+ union sctp_params param;
sctp_addiphdr_t *hdr;
union sctp_addr_param *addr_param;
sctp_addip_param_t *asconf_param;
struct sctp_chunk *asconf_ack;
-
__be16 err_code;
int length = 0;
int chunk_len;
__u32 serial;
- int all_param_pass = 1;
chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
hdr = (sctp_addiphdr_t *)asconf->skb->data;
goto done;
/* Process the TLVs contained within the ASCONF chunk. */
- while (chunk_len > 0) {
+ sctp_walk_params(param, addip, addip_hdr.params) {
+ /* Skip preceeding address parameters. */
+ if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
+ param.p->type == SCTP_PARAM_IPV6_ADDRESS)
+ continue;
+
err_code = sctp_process_asconf_param(asoc, asconf,
- asconf_param);
+ param.addip);
/* ADDIP 4.1 A7)
* If an error response is received for a TLV parameter,
* all TLVs with no response before the failed TLV are
* the failed response are considered unsuccessful unless
* a specific success indication is present for the parameter.
*/
- if (SCTP_ERROR_NO_ERROR != err_code)
- all_param_pass = 0;
-
+ if (err_code != SCTP_ERROR_NO_ERROR)
+ all_param_pass = false;
if (!all_param_pass)
- sctp_add_asconf_response(asconf_ack,
- asconf_param->crr_id, err_code,
- asconf_param);
+ sctp_add_asconf_response(asconf_ack, param.addip->crr_id,
+ err_code, param.addip);
/* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
* an IP address sends an 'Out of Resource' in its response, it
* MUST also fail any subsequent add or delete requests bundled
* in the ASCONF.
*/
- if (SCTP_ERROR_RSRC_LOW == err_code)
+ if (err_code == SCTP_ERROR_RSRC_LOW)
goto done;
-
- /* Move to the next ASCONF param. */
- length = ntohs(asconf_param->param_hdr.length);
- asconf_param = (void *)asconf_param + length;
- chunk_len -= length;
}
-
done:
asoc->peer.addip_serial++;
{
__u16 chunk_length = ntohs(chunk->chunk_hdr->length);
+ /* Previously already marked? */
+ if (unlikely(chunk->pdiscard))
+ return 0;
if (unlikely(chunk_length < required_length))
return 0;
struct sctp_chunk *asconf_ack = NULL;
struct sctp_paramhdr *err_param = NULL;
sctp_addiphdr_t *hdr;
- union sctp_addr_param *addr_param;
__u32 serial;
- int length;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
hdr = (sctp_addiphdr_t *)chunk->skb->data;
serial = ntohl(hdr->serial);
- addr_param = (union sctp_addr_param *)hdr->params;
- length = ntohs(addr_param->p.length);
- if (length < sizeof(sctp_paramhdr_t))
- return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
- (void *)addr_param, commands);
-
/* Verify the ASCONF chunk before processing it. */
- if (!sctp_verify_asconf(asoc,
- (sctp_paramhdr_t *)((void *)addr_param + length),
- (void *)chunk->chunk_end,
- &err_param))
+ if (!sctp_verify_asconf(asoc, chunk, true, &err_param))
return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
(void *)err_param, commands);
rcvd_serial = ntohl(addip_hdr->serial);
/* Verify the ASCONF-ACK chunk before processing it. */
- if (!sctp_verify_asconf(asoc,
- (sctp_paramhdr_t *)addip_hdr->params,
- (void *)asconf_ack->chunk_end,
- &err_param))
+ if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param))
return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
(void *)err_param, commands);
+* 3.12.1
+- workaround for qHD (540x960) video mode
+== GiWoong Kim <giwoong.kim@samsung.com> 2015-03-12
+* 2.0.21
+- brillcodec: modify debugging method.
+== Kitae Kim <kt920.kim@samsung.com> 2015-01-20
* 2.0.20
- smack: [PATCH] Fix a bidirectional UDS connect check
- enable smp feature
-Version: 2.0.20
+Version: 3.12.1
Maintainer: Yeong-Kyoon, Lee <yeongkyoon.lee@samsung.com>
Source: emulator-kernel
Name: emulator-kernel
Summary: The Linux Emulator Kernel
Version: 3.12.18
-Release: 4
+Release: 5
License: GPL-2.0
Group: System Environment/Kernel
Vendor: The Linux Community
#BuildRequires: linux-glibc-devel
#BuildRequires: bc
+BuildRequires: emulator-kernel-user-headers
Provides: kernel = %{version}-%{release}
Provides: kernel-uname-r = %{fullVersion}
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
atomic_dec(&key->user->nikeys);
- key_user_put(key->user);
-
/* now throw away the key memory */
if (key->type->destroy)
key->type->destroy(key);
+ key_user_put(key->user);
+
kfree(key->description);
#ifdef KEY_DEBUGGING
{
struct snd_kcontrol *kctl;
+ /* Make sure that the ids assigned to the control do not wrap around */
+ if (card->last_numid >= UINT_MAX - count)
+ card->last_numid = 0;
+
list_for_each_entry(kctl, &card->controls, list) {
if (kctl->id.numid < card->last_numid + 1 + count &&
kctl->id.numid + kctl->count > card->last_numid + 1) {
{
struct snd_ctl_elem_id id;
unsigned int idx;
+ unsigned int count;
int err = -EINVAL;
if (! kcontrol)
if (snd_BUG_ON(!card || !kcontrol->info))
goto error;
id = kcontrol->id;
+ if (id.index > UINT_MAX - kcontrol->count)
+ goto error;
+
down_write(&card->controls_rwsem);
if (snd_ctl_find_id(card, &id)) {
up_write(&card->controls_rwsem);
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
+ count = kcontrol->count;
up_write(&card->controls_rwsem);
- for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
+ for (idx = 0; idx < count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
return 0;
bool add_on_replace)
{
struct snd_ctl_elem_id id;
+ unsigned int count;
unsigned int idx;
struct snd_kcontrol *old;
int ret;
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
+ count = kcontrol->count;
up_write(&card->controls_rwsem);
- for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
+ for (idx = 0; idx < count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
return 0;
result = kctl->put(kctl, control);
}
if (result > 0) {
+ struct snd_ctl_elem_id id = control->id;
up_read(&card->controls_rwsem);
- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
- &control->id);
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
return 0;
}
}
struct user_element {
struct snd_ctl_elem_info info;
+ struct snd_card *card;
void *elem_data; /* element data */
unsigned long elem_data_size; /* size of element data in bytes */
void *tlv_data; /* TLV data */
{
struct user_element *ue = kcontrol->private_data;
+ mutex_lock(&ue->card->user_ctl_lock);
memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size);
+ mutex_unlock(&ue->card->user_ctl_lock);
return 0;
}
{
int change;
struct user_element *ue = kcontrol->private_data;
-
+
+ mutex_lock(&ue->card->user_ctl_lock);
change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0;
if (change)
memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size);
+ mutex_unlock(&ue->card->user_ctl_lock);
return change;
}
new_data = memdup_user(tlv, size);
if (IS_ERR(new_data))
return PTR_ERR(new_data);
+ mutex_lock(&ue->card->user_ctl_lock);
change = ue->tlv_data_size != size;
if (!change)
change = memcmp(ue->tlv_data, new_data, size);
kfree(ue->tlv_data);
ue->tlv_data = new_data;
ue->tlv_data_size = size;
+ mutex_unlock(&ue->card->user_ctl_lock);
} else {
- if (! ue->tlv_data_size || ! ue->tlv_data)
- return -ENXIO;
- if (size < ue->tlv_data_size)
- return -ENOSPC;
+ int ret = 0;
+
+ mutex_lock(&ue->card->user_ctl_lock);
+ if (!ue->tlv_data_size || !ue->tlv_data) {
+ ret = -ENXIO;
+ goto err_unlock;
+ }
+ if (size < ue->tlv_data_size) {
+ ret = -ENOSPC;
+ goto err_unlock;
+ }
if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
- return -EFAULT;
+ ret = -EFAULT;
+err_unlock:
+ mutex_unlock(&ue->card->user_ctl_lock);
+ if (ret)
+ return ret;
}
return change;
}
struct user_element *ue;
int idx, err;
- if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS)
- return -ENOMEM;
if (info->count < 1)
return -EINVAL;
access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
info->id.numid = 0;
memset(&kctl, 0, sizeof(kctl));
- down_write(&card->controls_rwsem);
- _kctl = snd_ctl_find_id(card, &info->id);
- err = 0;
- if (_kctl) {
- if (replace)
- err = snd_ctl_remove(card, _kctl);
- else
- err = -EBUSY;
- } else {
- if (replace)
- err = -ENOENT;
+
+ if (replace) {
+ err = snd_ctl_remove_user_ctl(file, &info->id);
+ if (err)
+ return err;
}
- up_write(&card->controls_rwsem);
- if (err < 0)
- return err;
+
+ if (card->user_ctl_count >= MAX_USER_CONTROLS)
+ return -ENOMEM;
+
memcpy(&kctl.id, &info->id, sizeof(info->id));
kctl.count = info->owner ? info->owner : 1;
access |= SNDRV_CTL_ELEM_ACCESS_USER;
ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
if (ue == NULL)
return -ENOMEM;
+ ue->card = card;
ue->info = *info;
ue->info.access = 0;
ue->elem_data = (char *)ue + sizeof(*ue);
}
err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
if (err > 0) {
+ struct snd_ctl_elem_id id = kctl->id;
up_read(&card->controls_rwsem);
- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id);
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id);
return 0;
}
} else {
INIT_LIST_HEAD(&card->devices);
init_rwsem(&card->controls_rwsem);
rwlock_init(&card->ctl_files_rwlock);
+ mutex_init(&card->user_ctl_lock);
INIT_LIST_HEAD(&card->controls);
INIT_LIST_HEAD(&card->ctl_files);
spin_lock_init(&card->files_lock);
TARGETS += kcmp
TARGETS += memory-hotplug
TARGETS += mqueue
+TARGETS += mount
TARGETS += net
TARGETS += ptrace
TARGETS += timers
--- /dev/null
+# Makefile for mount selftests.
+
+all: unprivileged-remount-test
+
+unprivileged-remount-test: unprivileged-remount-test.c
+ gcc -Wall -O2 unprivileged-remount-test.c -o unprivileged-remount-test
+
+# Allow specific tests to be selected.
+test_unprivileged_remount: unprivileged-remount-test
+ @if [ -f /proc/self/uid_map ] ; then ./unprivileged-remount-test ; fi
+
+run_tests: all test_unprivileged_remount
+
+clean:
+ rm -f unprivileged-remount-test
+
+.PHONY: all test_unprivileged_remount
--- /dev/null
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/mount.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <grp.h>
+#include <stdbool.h>
+#include <stdarg.h>
+
+#ifndef CLONE_NEWNS
+# define CLONE_NEWNS 0x00020000
+#endif
+#ifndef CLONE_NEWUTS
+# define CLONE_NEWUTS 0x04000000
+#endif
+#ifndef CLONE_NEWIPC
+# define CLONE_NEWIPC 0x08000000
+#endif
+#ifndef CLONE_NEWNET
+# define CLONE_NEWNET 0x40000000
+#endif
+#ifndef CLONE_NEWUSER
+# define CLONE_NEWUSER 0x10000000
+#endif
+#ifndef CLONE_NEWPID
+# define CLONE_NEWPID 0x20000000
+#endif
+
+#ifndef MS_RELATIME
+#define MS_RELATIME (1 << 21)
+#endif
+#ifndef MS_STRICTATIME
+#define MS_STRICTATIME (1 << 24)
+#endif
+
+static void die(char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+ exit(EXIT_FAILURE);
+}
+
+static void write_file(char *filename, char *fmt, ...)
+{
+ char buf[4096];
+ int fd;
+ ssize_t written;
+ int buf_len;
+ va_list ap;
+
+ va_start(ap, fmt);
+ buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+ if (buf_len < 0) {
+ die("vsnprintf failed: %s\n",
+ strerror(errno));
+ }
+ if (buf_len >= sizeof(buf)) {
+ die("vsnprintf output truncated\n");
+ }
+
+ fd = open(filename, O_WRONLY);
+ if (fd < 0) {
+ die("open of %s failed: %s\n",
+ filename, strerror(errno));
+ }
+ written = write(fd, buf, buf_len);
+ if (written != buf_len) {
+ if (written >= 0) {
+ die("short write to %s\n", filename);
+ } else {
+ die("write to %s failed: %s\n",
+ filename, strerror(errno));
+ }
+ }
+ if (close(fd) != 0) {
+ die("close of %s failed: %s\n",
+ filename, strerror(errno));
+ }
+}
+
+static void create_and_enter_userns(void)
+{
+ uid_t uid;
+ gid_t gid;
+
+ uid = getuid();
+ gid = getgid();
+
+ if (unshare(CLONE_NEWUSER) !=0) {
+ die("unshare(CLONE_NEWUSER) failed: %s\n",
+ strerror(errno));
+ }
+
+ write_file("/proc/self/uid_map", "0 %d 1", uid);
+ write_file("/proc/self/gid_map", "0 %d 1", gid);
+
+ if (setgroups(0, NULL) != 0) {
+ die("setgroups failed: %s\n",
+ strerror(errno));
+ }
+ if (setgid(0) != 0) {
+ die ("setgid(0) failed %s\n",
+ strerror(errno));
+ }
+ if (setuid(0) != 0) {
+ die("setuid(0) failed %s\n",
+ strerror(errno));
+ }
+}
+
+static
+bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
+{
+ pid_t child;
+
+ child = fork();
+ if (child == -1) {
+ die("fork failed: %s\n",
+ strerror(errno));
+ }
+ if (child != 0) { /* parent */
+ pid_t pid;
+ int status;
+ pid = waitpid(child, &status, 0);
+ if (pid == -1) {
+ die("waitpid failed: %s\n",
+ strerror(errno));
+ }
+ if (pid != child) {
+ die("waited for %d got %d\n",
+ child, pid);
+ }
+ if (!WIFEXITED(status)) {
+ die("child did not terminate cleanly\n");
+ }
+ return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
+ }
+
+ create_and_enter_userns();
+ if (unshare(CLONE_NEWNS) != 0) {
+ die("unshare(CLONE_NEWNS) failed: %s\n",
+ strerror(errno));
+ }
+
+ if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) {
+ die("mount of /tmp failed: %s\n",
+ strerror(errno));
+ }
+
+ create_and_enter_userns();
+
+ if (unshare(CLONE_NEWNS) != 0) {
+ die("unshare(CLONE_NEWNS) failed: %s\n",
+ strerror(errno));
+ }
+
+ if (mount("/tmp", "/tmp", "none",
+ MS_REMOUNT | MS_BIND | remount_flags, NULL) != 0) {
+ /* system("cat /proc/self/mounts"); */
+ die("remount of /tmp failed: %s\n",
+ strerror(errno));
+ }
+
+ if (mount("/tmp", "/tmp", "none",
+ MS_REMOUNT | MS_BIND | invalid_flags, NULL) == 0) {
+ /* system("cat /proc/self/mounts"); */
+ die("remount of /tmp with invalid flags "
+ "succeeded unexpectedly\n");
+ }
+ exit(EXIT_SUCCESS);
+}
+
+static bool test_unpriv_remount_simple(int mount_flags)
+{
+ return test_unpriv_remount(mount_flags, mount_flags, 0);
+}
+
+static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags)
+{
+ return test_unpriv_remount(mount_flags, mount_flags, invalid_flags);
+}
+
+int main(int argc, char **argv)
+{
+ if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) {
+ die("MS_RDONLY malfunctions\n");
+ }
+ if (!test_unpriv_remount_simple(MS_NODEV)) {
+ die("MS_NODEV malfunctions\n");
+ }
+ if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) {
+ die("MS_NOSUID malfunctions\n");
+ }
+ if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) {
+ die("MS_NOEXEC malfunctions\n");
+ }
+ if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV,
+ MS_NOATIME|MS_NODEV))
+ {
+ die("MS_RELATIME malfunctions\n");
+ }
+ if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV,
+ MS_NOATIME|MS_NODEV))
+ {
+ die("MS_STRICTATIME malfunctions\n");
+ }
+ if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV,
+ MS_STRICTATIME|MS_NODEV))
+ {
+ die("MS_RELATIME malfunctions\n");
+ }
+ if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV,
+ MS_NOATIME|MS_NODEV))
+ {
+ die("MS_RELATIME malfunctions\n");
+ }
+ if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV,
+ MS_NOATIME|MS_NODEV))
+ {
+ die("MS_RELATIME malfunctions\n");
+ }
+ if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV,
+ MS_STRICTATIME|MS_NODEV))
+ {
+ die("MS_RELATIME malfunctions\n");
+ }
+ if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV,
+ MS_NOATIME|MS_NODEV))
+ {
+ die("Default atime malfunctions\n");
+ }
+ return EXIT_SUCCESS;
+}