tizen 2.3.1 release tizen_2.3.1 submit/tizen_2.3.1/20150918.050150 tizen_2.3.1_release
authorjk7744.park <jk7744.park@samsung.com>
Fri, 18 Sep 2015 02:47:26 +0000 (11:47 +0900)
committerjk7744.park <jk7744.park@samsung.com>
Fri, 18 Sep 2015 02:47:26 +0000 (11:47 +0900)
35 files changed:
arch/x86/configs/i386_tizen_emul_defconfig
drivers/gpu/drm/vigs/vigs_output.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-magicmouse.c
drivers/hid/hid-picolcd_core.c
drivers/maru/maru_brillcodec.c
drivers/media/usb/ttusb-dec/ttusbdecfe.c
drivers/usb/serial/whiteheat.c
fs/isofs/inode.c
fs/isofs/isofs.h
fs/isofs/rock.c
fs/namespace.c
include/linux/mount.h
include/net/sctp/sctp.h
include/net/sctp/sm.h
include/sound/core.h
kernel/futex.c
kernel/user_namespace.c
lib/lz4/lz4_decompress.c
lib/lzo/lzo1x_decompress_safe.c
mm/shmem.c
net/netfilter/nf_conntrack_proto_generic.c
net/sctp/associola.c
net/sctp/inqueue.c
net/sctp/sm_make_chunk.c
net/sctp/sm_statefuns.c
package/changelog
package/pkginfo.manifest
packaging/emulator-kernel.spec
security/keys/gc.c
sound/core/control.c
sound/core/init.c
tools/testing/selftests/Makefile
tools/testing/selftests/mount/Makefile [new file with mode: 0644]
tools/testing/selftests/mount/unprivileged-remount-test.c [new file with mode: 0644]

index 95410dd3d933d29ca37ad191067723c2c82aa5d0..75f8c6ec32a25f88b5bc0c644713e999ceec6a03 100644 (file)
@@ -33,11 +33,8 @@ CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
 # CONFIG_AUDIT_ARCH is not set
 CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
 CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
-CONFIG_X86_32_SMP=y
-CONFIG_X86_HT=y
 CONFIG_X86_32_LAZY_GS=y
 CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-ecx -fcall-saved-edx"
-CONFIG_ARCH_CPU_PROBE_RELEASE=y
 CONFIG_ARCH_SUPPORTS_UPROBES=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 CONFIG_IRQ_WORK=y
@@ -46,6 +43,7 @@ CONFIG_BUILDTIME_EXTABLE_SORT=y
 #
 # General setup
 #
+CONFIG_BROKEN_ON_SMP=y
 CONFIG_INIT_ENV_ARG_LIMIT=32
 CONFIG_CROSS_COMPILE=""
 # CONFIG_COMPILE_TEST is not set
@@ -81,14 +79,12 @@ CONFIG_AUDIT_TREE=y
 #
 CONFIG_GENERIC_IRQ_PROBE=y
 CONFIG_GENERIC_IRQ_SHOW=y
-CONFIG_GENERIC_PENDING_IRQ=y
 CONFIG_IRQ_FORCED_THREADING=y
 CONFIG_SPARSE_IRQ=y
 CONFIG_CLOCKSOURCE_WATCHDOG=y
 CONFIG_KTIME_SCALAR=y
 CONFIG_GENERIC_CLOCKEVENTS=y
 CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
-CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
 CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
 CONFIG_GENERIC_CMOS_UPDATE=y
 
@@ -123,7 +119,6 @@ CONFIG_RCU_STALL_COMMON=y
 CONFIG_RCU_FANOUT=32
 CONFIG_RCU_FANOUT_LEAF=16
 # CONFIG_RCU_FANOUT_EXACT is not set
-# CONFIG_RCU_FAST_NO_HZ is not set
 # CONFIG_TREE_RCU_TRACE is not set
 # CONFIG_RCU_BOOST is not set
 # CONFIG_RCU_NOCB_CPU is not set
@@ -217,7 +212,6 @@ CONFIG_SLUB_DEBUG=y
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_SLAB is not set
 CONFIG_SLUB=y
-CONFIG_SLUB_CPU_PARTIAL=y
 CONFIG_PROFILING=y
 CONFIG_TRACEPOINTS=y
 CONFIG_OPROFILE=y
@@ -240,7 +234,6 @@ CONFIG_HAVE_KPROBES_ON_FTRACE=y
 CONFIG_HAVE_ARCH_TRACEHOOK=y
 CONFIG_HAVE_DMA_ATTRS=y
 CONFIG_HAVE_DMA_CONTIGUOUS=y
-CONFIG_USE_GENERIC_SMP_HELPERS=y
 CONFIG_GENERIC_SMP_IDLE_THREAD=y
 CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
 CONFIG_HAVE_DMA_API_DEBUG=y
@@ -280,7 +273,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_MODVERSIONS is not set
 # CONFIG_MODULE_SRCVERSION_ALL is not set
 # CONFIG_MODULE_SIG is not set
-CONFIG_STOP_MACHINE=y
 CONFIG_BLOCK=y
 CONFIG_LBDAF=y
 CONFIG_BLK_DEV_BSG=y
@@ -325,22 +317,18 @@ CONFIG_DEFAULT_CFQ=y
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="cfq"
 CONFIG_UNINLINE_SPIN_UNLOCK=y
-CONFIG_MUTEX_SPIN_ON_OWNER=y
 CONFIG_FREEZER=y
 
 #
 # Processor type and features
 #
 CONFIG_ZONE_DMA=y
-CONFIG_SMP=y
-CONFIG_X86_MPPARSE=y
-# CONFIG_X86_BIGSMP is not set
+# CONFIG_SMP is not set
 CONFIG_X86_EXTENDED_PLATFORM=y
 # CONFIG_X86_GOLDFISH is not set
 # CONFIG_X86_WANT_INTEL_MID is not set
 # CONFIG_X86_INTEL_LPSS is not set
 # CONFIG_X86_RDC321X is not set
-# CONFIG_X86_32_NON_STANDARD is not set
 CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
 # CONFIG_X86_32_IRIS is not set
 CONFIG_SCHED_OMIT_FRAME_POINTER=y
@@ -388,23 +376,15 @@ CONFIG_CPU_SUP_CENTAUR=y
 CONFIG_CPU_SUP_TRANSMETA_32=y
 # CONFIG_HPET_TIMER is not set
 CONFIG_DMI=y
-CONFIG_NR_CPUS=8
-# CONFIG_SCHED_SMT is not set
-CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=1
 # CONFIG_PREEMPT_NONE is not set
 # CONFIG_PREEMPT_VOLUNTARY is not set
 CONFIG_PREEMPT=y
 CONFIG_PREEMPT_COUNT=y
-CONFIG_X86_LOCAL_APIC=y
-CONFIG_X86_IO_APIC=y
-# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
+# CONFIG_X86_UP_APIC is not set
 CONFIG_X86_MCE=y
-CONFIG_X86_MCE_INTEL=y
-CONFIG_X86_MCE_AMD=y
 # CONFIG_X86_ANCIENT_MCE is not set
-CONFIG_X86_MCE_THRESHOLD=y
 # CONFIG_X86_MCE_INJECT is not set
-CONFIG_X86_THERMAL_VECTOR=y
 CONFIG_VM86=y
 # CONFIG_TOSHIBA is not set
 # CONFIG_I8K is not set
@@ -451,6 +431,7 @@ CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
 # CONFIG_MEMORY_FAILURE is not set
 # CONFIG_TRANSPARENT_HUGEPAGE is not set
 CONFIG_CROSS_MEMORY_ATTACH=y
+CONFIG_NEED_PER_CPU_KM=y
 # CONFIG_CLEANCACHE is not set
 # CONFIG_FRONTSWAP is not set
 # CONFIG_CMA is not set
@@ -483,9 +464,6 @@ CONFIG_PHYSICAL_START=0x1000000
 CONFIG_RELOCATABLE=y
 CONFIG_X86_NEED_RELOCS=y
 CONFIG_PHYSICAL_ALIGN=0x1000000
-CONFIG_HOTPLUG_CPU=y
-# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
-# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
 # CONFIG_COMPAT_VDSO is not set
 # CONFIG_CMDLINE_BOOL is not set
 CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
@@ -499,7 +477,6 @@ CONFIG_HIBERNATE_CALLBACKS=y
 CONFIG_HIBERNATION=y
 CONFIG_PM_STD_PARTITION=""
 CONFIG_PM_SLEEP=y
-CONFIG_PM_SLEEP_SMP=y
 # CONFIG_PM_AUTOSLEEP is not set
 # CONFIG_PM_WAKELOCKS is not set
 # CONFIG_PM_RUNTIME is not set
@@ -523,7 +500,6 @@ CONFIG_ACPI_VIDEO=y
 CONFIG_ACPI_FAN=y
 CONFIG_ACPI_DOCK=y
 CONFIG_ACPI_PROCESSOR=y
-CONFIG_ACPI_HOTPLUG_CPU=y
 # CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set
 CONFIG_ACPI_THERMAL=y
 # CONFIG_ACPI_CUSTOM_DSDT is not set
@@ -620,7 +596,6 @@ CONFIG_PCIEASPM_DEFAULT=y
 # CONFIG_PCI_DEBUG is not set
 # CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
 # CONFIG_PCI_STUB is not set
-CONFIG_HT_IRQ=y
 # CONFIG_PCI_IOV is not set
 # CONFIG_PCI_PRI is not set
 # CONFIG_PCI_PASID is not set
@@ -657,7 +632,6 @@ CONFIG_YENTA_TOSHIBA=y
 CONFIG_PCCARD_NONSTATIC=y
 CONFIG_HOTPLUG_PCI=y
 # CONFIG_HOTPLUG_PCI_COMPAQ is not set
-# CONFIG_HOTPLUG_PCI_IBM is not set
 CONFIG_HOTPLUG_PCI_ACPI=y
 # CONFIG_HOTPLUG_PCI_ACPI_IBM is not set
 # CONFIG_HOTPLUG_PCI_CPCI is not set
@@ -1008,13 +982,9 @@ CONFIG_DNS_RESOLVER=y
 # CONFIG_NETLINK_MMAP is not set
 # CONFIG_NETLINK_DIAG is not set
 # CONFIG_NET_MPLS_GSO is not set
-CONFIG_RPS=y
-CONFIG_RFS_ACCEL=y
-CONFIG_XPS=y
 CONFIG_NETPRIO_CGROUP=y
 CONFIG_NET_RX_BUSY_POLL=y
 CONFIG_BQL=y
-CONFIG_NET_FLOW_LIMIT=y
 
 #
 # Network testing
@@ -2092,11 +2062,10 @@ CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
 # CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
 # CONFIG_THERMAL_GOV_FAIR_SHARE is not set
 CONFIG_THERMAL_GOV_STEP_WISE=y
-CONFIG_THERMAL_GOV_USER_SPACE=y
+# CONFIG_THERMAL_GOV_USER_SPACE is not set
 # CONFIG_CPU_THERMAL is not set
 # CONFIG_THERMAL_EMULATION is not set
 # CONFIG_INTEL_POWERCLAMP is not set
-CONFIG_X86_PKG_TEMP_THERMAL=m
 
 #
 # Texas Instruments thermal drivers
@@ -2869,8 +2838,6 @@ CONFIG_LEDS_TRIGGERS=y
 CONFIG_EDAC=y
 CONFIG_EDAC_LEGACY_SYSFS=y
 # CONFIG_EDAC_DEBUG is not set
-CONFIG_EDAC_DECODE_MCE=y
-# CONFIG_EDAC_MCE_INJ is not set
 # CONFIG_EDAC_MM_EDAC is not set
 CONFIG_RTC_LIB=y
 CONFIG_RTC_CLASS=y
@@ -3370,7 +3337,6 @@ CONFIG_DEBUG_STACK_USAGE=y
 # CONFIG_DEBUG_VM is not set
 # CONFIG_DEBUG_VIRTUAL is not set
 CONFIG_DEBUG_MEMORY_INIT=y
-# CONFIG_DEBUG_PER_CPU_MAPS is not set
 # CONFIG_DEBUG_HIGHMEM is not set
 CONFIG_HAVE_DEBUG_STACKOVERFLOW=y
 CONFIG_DEBUG_STACKOVERFLOW=y
@@ -3517,7 +3483,6 @@ CONFIG_DEFAULT_IO_DELAY_TYPE=0
 CONFIG_DEBUG_BOOT_PARAMS=y
 # CONFIG_CPA_DEBUG is not set
 CONFIG_OPTIMIZE_INLINING=y
-# CONFIG_DEBUG_NMI_SELFTEST is not set
 # CONFIG_X86_DEBUG_STATIC_CPU_HAS is not set
 
 #
@@ -3563,7 +3528,6 @@ CONFIG_CRYPTO_MANAGER2=y
 CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
 # CONFIG_CRYPTO_GF128MUL is not set
 # CONFIG_CRYPTO_NULL is not set
-# CONFIG_CRYPTO_PCRYPT is not set
 CONFIG_CRYPTO_WORKQUEUE=y
 # CONFIG_CRYPTO_CRYPTD is not set
 CONFIG_CRYPTO_AUTHENC=y
@@ -3716,7 +3680,6 @@ CONFIG_HAS_IOMEM=y
 CONFIG_HAS_IOPORT=y
 CONFIG_HAS_DMA=y
 CONFIG_CHECK_SIGNATURE=y
-CONFIG_CPU_RMAP=y
 CONFIG_DQL=y
 CONFIG_NLATTR=y
 CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
index fffcb709da8289c8fa692e296e72a0d5b66a5051..77b83b14166a97c5ac943f305874e97034d6495d 100644 (file)
@@ -101,7 +101,16 @@ static int vigs_connector_get_modes(struct drm_connector *connector)
             struct drm_display_mode *preferred_mode =
                 drm_mode_create_from_cmdline_mode(drm_dev,
                                                   &cmdline_mode);
+
+            /* qHD workaround (540x960) */
+            if (cmdline_mode.xres == 540 && cmdline_mode.yres == 960) {
+                preferred_mode->hdisplay = cmdline_mode.xres;
+                preferred_mode->hsync_start = preferred_mode->hsync_start - 1;
+                preferred_mode->hsync_end = preferred_mode->hsync_end - 1;
+            }
+
             preferred_mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+            drm_mode_set_crtcinfo(preferred_mode, CRTC_INTERLACE_HALVE_V);
             drm_mode_probed_add(connector, preferred_mode);
             return 1;
         }
index 2e5302462efb088b3f3019b26fedfc816ca493f5..5d08c831881b2736642d92cbc21aaa43e1fb9b66 100644 (file)
@@ -237,13 +237,6 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
                return;
        }
 
-       if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
-           (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
-               dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n",
-                       __func__, dj_report->device_index);
-               return;
-       }
-
        if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
                /* The device is already known. No need to reallocate it. */
                dbg_hid("%s: device is already known\n", __func__);
@@ -713,6 +706,12 @@ static int logi_dj_raw_event(struct hid_device *hdev,
         * device (via hid_input_report() ) and return 1 so hid-core does not do
         * anything else with it.
         */
+       if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
+           (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
+               dev_err(&hdev->dev, "%s: invalid device index:%d\n",
+                               __func__, dj_report->device_index);
+               return false;
+       }
 
        spin_lock_irqsave(&djrcv_dev->lock, flags);
        if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
index 3b43d1cfa9368609302de46a73658779199d446f..991ba79cfc727ac2d675b43d1c779caa0b3bf740 100644 (file)
@@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
                if (size < 4 || ((size - 4) % 9) != 0)
                        return 0;
                npoints = (size - 4) / 9;
+               if (npoints > 15) {
+                       hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n",
+                                       size);
+                       return 0;
+               }
                msc->ntouches = 0;
                for (ii = 0; ii < npoints; ii++)
                        magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
@@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
                if (size < 6 || ((size - 6) % 8) != 0)
                        return 0;
                npoints = (size - 6) / 8;
+               if (npoints > 15) {
+                       hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n",
+                                       size);
+                       return 0;
+               }
                msc->ntouches = 0;
                for (ii = 0; ii < npoints; ii++)
                        magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
index acbb021065ece8287c9d3ea433c860afc0711855..020df3c2e8b42717c62bbe0470aa47845535e4a5 100644 (file)
@@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev,
        if (!data)
                return 1;
 
+       if (size > 64) {
+               hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n",
+                               size);
+               return 0;
+       }
+
        if (report->id == REPORT_KEY_STATE) {
                if (data->input_keys)
                        ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
index 2d3dc37d9dd38a6eb5bd437111a3b0cd0536bc72..f93fa8014f202815c01991695c3d82074d63b578 100644 (file)
@@ -64,20 +64,17 @@ MODULE_LICENSE("GPL2");
 #define CODEC_IRQ_TASK 0x1f
 
 // DEBUG
-#ifdef CODEC_DEBUG
-#define DEBUG(fmt, ...) \
-       printk(KERN_DEBUG "[%s][%d]: " fmt, DEVICE_NAME, __LINE__, ##__VA_ARGS__)
+int brillcodec_debug = 0;
+module_param(brillcodec_debug, int, 0644);
+MODULE_PARM_DESC(brillcodec_debug, "Turn on/off brillcodec debugging (default:off).");
 
-#define INFO(fmt, ...) \
-       printk(KERN_INFO "[%s][%d]: " fmt, DEVICE_NAME, __LINE__, ##__VA_ARGS__)
-#else
-#define DEBUG(fmt, ...)
+#define CODEC_DBG(level, fmt, ...) \
+       do { \
+               if (brillcodec_debug > 0) { \
+                       printk(level "[%s][%d]: " fmt, DEVICE_NAME, __LINE__, ##__VA_ARGS__); \
+               } \
+       } while (0)
 
-#define INFO(fmt, ...)
-#endif
-
-#define ERROR(fmt, ...) \
-       printk(KERN_ERR "[%s][%d]: " fmt, DEVICE_NAME, __LINE__, ##__VA_ARGS__)
 
 /* Define i/o and api values.  */
 enum codec_io_cmd {
@@ -247,23 +244,23 @@ static void codec_bh_func(struct work_struct *work)
 {
        uint32_t value;
 
-       DEBUG("%s\n", __func__);
+       CODEC_DBG(KERN_DEBUG, "%s\n", __func__);
        do {
                value =
                        readl(maru_brill_codec->ioaddr + CODEC_CMD_GET_CTX_FROM_QUEUE);
-               DEBUG("read a value from device %x.\n", value);
+               CODEC_DBG(KERN_DEBUG, "read a value from device %x.\n", value);
                if (value) {
                        context_flags[value] = 1;
                        wake_up_interruptible(&wait_queue);
                } else {
-                       DEBUG("there is no available task\n");
+                       CODEC_DBG(KERN_DEBUG, "there is no available task\n");
                }
        } while (value);
 }
 
 static void codec_bh(struct maru_brill_codec_device *dev)
 {
-       DEBUG("add bottom-half function to codec_workqueue\n");
+       CODEC_DBG(KERN_DEBUG, "add bottom-half function to codec_workqueue\n");
        queue_work(codec_bh_workqueue, &codec_bh_work);
 }
 
@@ -283,33 +280,33 @@ static int secure_device_memory(uint32_t ctx_id, uint32_t buf_size,
        } else if (buf_size < CODEC_L_DEVICE_MEM_SIZE) {
                index = LARGE;
        } else {
-               ERROR("invalid buffer size: %x\n", buf_size);
+               CODEC_DBG(KERN_ERR, "invalid buffer size: %x\n", buf_size);
                return -1;
        }
 
        block = &maru_brill_codec->memory_blocks[index];
 
        // decrease buffer_semaphore
-       DEBUG("before down buffer_sema: %d\n", block->semaphore.count);
+       CODEC_DBG(KERN_DEBUG, "before down buffer_sema: %d\n", block->semaphore.count);
 
        if (non_blocking) {
                if (down_trylock(&block->semaphore)) { // if 1
-                       DEBUG("buffer is not available now\n");
+                       CODEC_DBG(KERN_DEBUG, "buffer is not available now\n");
                        return -1;
                }
        } else {
                if (down_trylock(&block->semaphore)) { // if 1
                        if (down_interruptible(&block->last_buf_semaphore)) { // if -EINTR
-                               DEBUG("down_interruptible interrupted\n");
+                               CODEC_DBG(KERN_DEBUG, "down_interruptible interrupted\n");
                                return -1;
                        }
                        block->last_buf_secured = 1; // protected under last_buf_semaphore
                        ret = 1;
-                       DEBUG("lock last buffer semaphore.\n");
+                       CODEC_DBG(KERN_DEBUG, "lock last buffer semaphore.\n");
                }
        }
 
-       DEBUG("after down buffer_sema: %d\n", block->semaphore.count);
+       CODEC_DBG(KERN_DEBUG, "after down buffer_sema: %d\n", block->semaphore.count);
 
        mutex_lock(&block->access_mutex);
        unit = list_first_entry(&block->available, struct device_mem, entry);
@@ -322,12 +319,12 @@ static int secure_device_memory(uint32_t ctx_id, uint32_t buf_size,
                } else {
                        up(&block->semaphore);
                }
-               ERROR("failed to get memory block.\n");
+               CODEC_DBG(KERN_ERR, "failed to get memory block.\n");
        } else {
                unit->ctx_id = ctx_id;
                list_move_tail(&unit->entry, &block->occupied);
                *offset = unit->mem_offset;
-               DEBUG("get available memory region: 0x%x\n", ret);
+               CODEC_DBG(KERN_DEBUG, "get available memory region: 0x%x\n", ret);
        }
        mutex_unlock(&block->access_mutex);
 
@@ -351,7 +348,7 @@ static void release_device_memory(uint32_t mem_offset)
                index = LARGE;
        } else {
                // error
-               ERROR("invalid memory offsset. offset = 0x%x.\n", (uint32_t)mem_offset);
+               CODEC_DBG(KERN_ERR, "invalid memory offsset. offset = 0x%x.\n", (uint32_t)mem_offset);
                return;
        }
 
@@ -368,10 +365,10 @@ static void release_device_memory(uint32_t mem_offset)
                                if (block->last_buf_secured) {
                                        block->last_buf_secured = 0;
                                        up(&block->last_buf_semaphore);
-                                       DEBUG("unlock last buffer semaphore.\n");
+                                       CODEC_DBG(KERN_DEBUG, "unlock last buffer semaphore.\n");
                                } else {
                                        up(&block->semaphore);
-                                       DEBUG("unlock semaphore: %d.\n", block->semaphore.count);
+                                       CODEC_DBG(KERN_DEBUG, "unlock semaphore: %d.\n", block->semaphore.count);
                                }
 
                                found = true;
@@ -380,11 +377,11 @@ static void release_device_memory(uint32_t mem_offset)
                }
                if (!found) {
                        // can not enter here...
-                       ERROR("cannot find this memory block. offset = 0x%x.\n", (uint32_t)mem_offset);
+                       CODEC_DBG(KERN_ERR, "cannot find this memory block. offset = 0x%x.\n", (uint32_t)mem_offset);
                }
        } else {
                // can not enter here...
-               ERROR("there is not any using memory block.\n");
+               CODEC_DBG(KERN_ERR, "there is not any using memory block.\n");
        }
        mutex_unlock(&block->access_mutex);
 }
@@ -406,7 +403,7 @@ static void dispose_device_memory(uint32_t context_id)
                                if (unit->ctx_id == context_id) {
                                        unit->ctx_id = 0;
                                        list_move_tail(&unit->entry, &block->available);
-                                       INFO("dispose memory block: %x", unit->mem_offset);
+                                       CODEC_DBG(KERN_INFO, "dispose memory block: %x", unit->mem_offset);
                                }
                        }
                }
@@ -423,7 +420,7 @@ static void maru_brill_codec_info_cache(void)
        memaddr = ioremap(maru_brill_codec->mem_start,
                                                maru_brill_codec->mem_size);
        if (!memaddr) {
-               ERROR("ioremap failed\n");
+               CODEC_DBG(KERN_ERR, "ioremap failed\n");
                return;
        }
 
@@ -432,7 +429,7 @@ static void maru_brill_codec_info_cache(void)
        codec_info =
                kzalloc(codec_info_len, GFP_KERNEL);
        if (!codec_info) {
-               ERROR("falied to allocate codec_info memory!\n");
+               CODEC_DBG(KERN_ERR, "falied to allocate codec_info memory!\n");
                return;
        }
 
@@ -449,15 +446,15 @@ static long put_data_into_buffer(struct codec_buffer_id *opaque) {
        uint32_t offset = 0;
        unsigned long flags;
 
-       DEBUG("read data into small buffer\n");
+       CODEC_DBG(KERN_DEBUG, "read data into small buffer\n");
 
     value = secure_device_memory(opaque->buffer_index, opaque->buffer_size, 0, &offset);
 
        if (value < 0) {
-               DEBUG("failed to get available memory\n");
+               CODEC_DBG(KERN_DEBUG, "failed to get available memory\n");
                ret = -EINVAL;
        } else {
-               DEBUG("send a request to pop data from device. %d\n", opaque->buffer_index);
+               CODEC_DBG(KERN_DEBUG, "send a request to pop data from device. %d\n", opaque->buffer_index);
 
                ENTER_CRITICAL_SECTION(flags);
                writel((uint32_t)offset,
@@ -489,10 +486,10 @@ static long maru_brill_codec_ioctl(struct file *file,
        switch (cmd) {
        case CODEC_CMD_GET_VERSION:
        {
-               DEBUG("%s version: %d\n", DEVICE_NAME, maru_brill_codec->version);
+               CODEC_DBG(KERN_DEBUG, "%s version: %d\n", DEVICE_NAME, maru_brill_codec->version);
 
                if (copy_to_user((void *)arg, &maru_brill_codec->version, sizeof(int))) {
-                       ERROR("ioctl: failed to copy data to user\n");
+                       CODEC_DBG(KERN_ERR, "ioctl: failed to copy data to user\n");
                        ret = -EIO;
                }
                break;
@@ -502,13 +499,13 @@ static long maru_brill_codec_ioctl(struct file *file,
                uint32_t len = 0;
                unsigned long flags;
 
-               DEBUG("request a device to get codec elements\n");
+               CODEC_DBG(KERN_DEBUG, "request a device to get codec elements\n");
 
                ENTER_CRITICAL_SECTION(flags);
                if (!maru_brill_codec->codec_elem_cached) {
                        value = readl(maru_brill_codec->ioaddr + cmd);
                        if (value < 0) {
-                               ERROR("ioctl: failed to get elements. %d\n", (int)value);
+                               CODEC_DBG(KERN_ERR, "ioctl: failed to get elements. %d\n", (int)value);
                                ret = -EINVAL;
                        }
                        maru_brill_codec_info_cache();
@@ -517,7 +514,7 @@ static long maru_brill_codec_ioctl(struct file *file,
                LEAVE_CRITICAL_SECTION(flags);
 
                if (copy_to_user((void *)arg, &len, sizeof(uint32_t))) {
-                       ERROR("ioctl: failed to copy data to user\n");
+                       CODEC_DBG(KERN_ERR, "ioctl: failed to copy data to user\n");
                        ret = -EIO;
                }
                break;
@@ -527,33 +524,33 @@ static long maru_brill_codec_ioctl(struct file *file,
                void *codec_elem = NULL;
                uint32_t elem_len = maru_brill_codec->codec_elem.buf_size;
 
-               DEBUG("request codec elements.\n");
+               CODEC_DBG(KERN_DEBUG, "request codec elements.\n");
 
                codec_elem = maru_brill_codec->codec_elem.buf;
                if (!codec_elem) {
-                       ERROR("ioctl: codec elements is empty\n");
+                       CODEC_DBG(KERN_ERR, "ioctl: codec elements is empty\n");
                        ret = -EIO;
                } else if (copy_to_user((void *)arg, codec_elem, elem_len)) {
-                       ERROR("ioctl: failed to copy data to user\n");
+                       CODEC_DBG(KERN_ERR, "ioctl: failed to copy data to user\n");
                        ret = -EIO;
                }
                break;
        }
        case CODEC_CMD_GET_CONTEXT_INDEX:
        {
-               DEBUG("request a device to get an index of codec context \n");
+               CODEC_DBG(KERN_DEBUG, "request a device to get an index of codec context \n");
 
                value = readl(maru_brill_codec->ioaddr + cmd);
                if (value < 1 || value > (CODEC_CONTEXT_SIZE - 1)) {
-                       ERROR("ioctl: failed to get proper context. %d\n", (int)value);
+                       CODEC_DBG(KERN_ERR, "ioctl: failed to get proper context. %d\n", (int)value);
                        ret = -EINVAL;
                } else {
                        // task_id & context_id
-                       DEBUG("add context. ctx_id: %d\n", (int)value);
+                       CODEC_DBG(KERN_DEBUG, "add context. ctx_id: %d\n", (int)value);
                        context_add((uint32_t)file, value);
 
                        if (copy_to_user((void *)arg, &value, sizeof(int))) {
-                               ERROR("ioctl: failed to copy data to user\n");
+                               CODEC_DBG(KERN_ERR, "ioctl: failed to copy data to user\n");
                                ret = -EIO;
                        }
                }
@@ -564,7 +561,7 @@ static long maru_brill_codec_ioctl(struct file *file,
                struct codec_buffer_id opaque;
 
                if (copy_from_user(&opaque, (void *)arg, sizeof(struct codec_buffer_id))) {
-                       ERROR("ioctl: failed to copy data from user\n");
+                       CODEC_DBG(KERN_ERR, "ioctl: failed to copy data from user\n");
                        ret = -EIO;
                        break;
                }
@@ -575,7 +572,7 @@ static long maru_brill_codec_ioctl(struct file *file,
                }
 
                if (copy_to_user((void *)arg, &opaque, sizeof(struct codec_buffer_id))) {
-                       ERROR("ioctl: failed to copy data to user.\n");
+                       CODEC_DBG(KERN_ERR, "ioctl: failed to copy data to user.\n");
                        ret = -EIO;
                }
                break;
@@ -585,21 +582,21 @@ static long maru_brill_codec_ioctl(struct file *file,
                uint32_t offset = 0;
                struct codec_buffer_id opaque;
 
-               DEBUG("read data into small buffer\n");
+               CODEC_DBG(KERN_DEBUG, "read data into small buffer\n");
                if (copy_from_user(&opaque, (void *)arg, sizeof(struct codec_buffer_id))) {
-                       ERROR("ioctl: failed to copy data from user\n");
+                       CODEC_DBG(KERN_ERR, "ioctl: failed to copy data from user\n");
                        ret = -EIO;
                        break;
                }
 
                value = secure_device_memory(opaque.buffer_index, opaque.buffer_size, 0, &offset);
                if (value < 0) {
-                       DEBUG("failed to get available memory\n");
+                       CODEC_DBG(KERN_DEBUG, "failed to get available memory\n");
                        ret = -EINVAL;
                } else {
                        opaque.buffer_size = offset;
                        if (copy_to_user((void *)arg, &opaque, sizeof(struct codec_buffer_id))) {
-                               ERROR("ioctl: failed to copy data to user.\n");
+                               CODEC_DBG(KERN_ERR, "ioctl: failed to copy data to user.\n");
                                ret = -EIO;
                        }
                }
@@ -610,21 +607,21 @@ static long maru_brill_codec_ioctl(struct file *file,
                uint32_t offset = 0;
                struct codec_buffer_id opaque;
 
-               DEBUG("read data into small buffer\n");
+               CODEC_DBG(KERN_DEBUG, "read data into small buffer\n");
                if (copy_from_user(&opaque, (void *)arg, sizeof(struct codec_buffer_id))) {
-                       ERROR("ioctl: failed to copy data from user\n");
+                       CODEC_DBG(KERN_ERR, "ioctl: failed to copy data from user\n");
                        ret = -EIO;
                        break;
                }
 
                value = secure_device_memory(opaque.buffer_index, opaque.buffer_size, 1, &offset);
                if (value < 0) {
-                       DEBUG("failed to get available memory\n");
+                       CODEC_DBG(KERN_DEBUG, "failed to get available memory\n");
                        ret = -EINVAL;
                } else {
                        opaque.buffer_size = offset;
                        if (copy_to_user((void *)arg, &opaque, sizeof(struct codec_buffer_id))) {
-                               ERROR("ioctl: failed to copy data to user.\n");
+                               CODEC_DBG(KERN_ERR, "ioctl: failed to copy data to user.\n");
                                ret = -EIO;
                        }
                }
@@ -635,7 +632,7 @@ static long maru_brill_codec_ioctl(struct file *file,
                uint32_t mem_offset;
 
                if (copy_from_user(&mem_offset, (void *)arg, sizeof(uint32_t))) {
-                       ERROR("ioctl: failed to copy data from user\n");
+                       CODEC_DBG(KERN_ERR, "ioctl: failed to copy data from user\n");
                        ret = -EIO;
                        break;
                }
@@ -647,7 +644,7 @@ static long maru_brill_codec_ioctl(struct file *file,
                struct codec_param ioparam = { 0, };
 
                if (copy_from_user(&ioparam, (void *)arg, sizeof(struct codec_param))) {
-                       ERROR("failed to get codec parameter info from user\n");
+                       CODEC_DBG(KERN_ERR, "failed to get codec parameter info from user\n");
                        ret = -EIO;
                        break;
                }
@@ -662,14 +659,14 @@ static long maru_brill_codec_ioctl(struct file *file,
                        }
 
                        if (copy_to_user((void *)arg, &ioparam, sizeof(struct codec_param))) {
-                               ERROR("ioctl: failed to copy data to user.\n");
+                               CODEC_DBG(KERN_ERR, "ioctl: failed to copy data to user.\n");
                                ret = -EIO;
                        }
                }
        }
                break;
        default:
-               DEBUG("no available command.");
+               CODEC_DBG(KERN_DEBUG, "no available command.");
                ret = -EINVAL;
                break;
        }
@@ -683,7 +680,7 @@ static int invoke_api_and_release_buffer(void *opaque)
        int api_index, ctx_index;
        unsigned long flags;
 
-       DEBUG("enter %s\n", __func__);
+       CODEC_DBG(KERN_DEBUG, "enter %s\n", __func__);
 
        api_index = ioparam->api_index;
        ctx_index = ioparam->ctx_index;
@@ -720,7 +717,7 @@ static int invoke_api_and_release_buffer(void *opaque)
                break;
        }
        default:
-               DEBUG("invalid API commands: %d", api_index);
+               CODEC_DBG(KERN_DEBUG, "invalid API commands: %d", api_index);
                return -1;
        }
 
@@ -731,7 +728,7 @@ static int invoke_api_and_release_buffer(void *opaque)
                dispose_device_memory(ioparam->ctx_index);
        }
 
-       DEBUG("leave %s\n", __func__);
+       CODEC_DBG(KERN_DEBUG, "leave %s\n", __func__);
 
        return 0;
 }
@@ -745,7 +742,7 @@ static int maru_brill_codec_mmap(struct file *file, struct vm_area_struct *vm)
 
        size = vm->vm_end - vm->vm_start;
        if (size > maru_brill_codec->mem_size) {
-               ERROR("over mapping size\n");
+               CODEC_DBG(KERN_ERR, "over mapping size\n");
                return -EINVAL;
        }
        off = vm->vm_pgoff << PAGE_SHIFT;
@@ -755,7 +752,7 @@ static int maru_brill_codec_mmap(struct file *file, struct vm_area_struct *vm)
        ret = remap_pfn_range(vm, vm->vm_start, phys_addr,
                        size, vm->vm_page_prot);
        if (ret < 0) {
-               ERROR("failed to remap page range\n");
+               CODEC_DBG(KERN_ERR, "failed to remap page range\n");
                return -EAGAIN;
        }
 
@@ -775,7 +772,7 @@ static irqreturn_t maru_brill_codec_irq_handler(int irq, void *dev_id)
 
        spin_lock_irqsave(&dev->lock, flags);
 
-       DEBUG("handle an interrupt from codec device.\n");
+       CODEC_DBG(KERN_DEBUG, "handle an interrupt from codec device.\n");
        codec_bh(dev);
 
        spin_unlock_irqrestore(&dev->lock, flags);
@@ -790,9 +787,9 @@ static void context_add(uint32_t user_pid, uint32_t ctx_id)
        struct context_id *cid_elem = NULL;
        unsigned long flags;
 
-       DEBUG("enter: %s\n", __func__);
+       CODEC_DBG(KERN_DEBUG, "enter: %s\n", __func__);
 
-       DEBUG("before inserting context. user_pid: %x, ctx_id: %d\n",
+       CODEC_DBG(KERN_DEBUG, "before inserting context. user_pid: %x, ctx_id: %d\n",
                        user_pid, ctx_id);
 
        ENTER_CRITICAL_SECTION(flags);
@@ -800,21 +797,21 @@ static void context_add(uint32_t user_pid, uint32_t ctx_id)
                list_for_each_safe(pos, temp, &maru_brill_codec->user_pid_mgr) {
                        pid_elem = list_entry(pos, struct user_process_id, pid_node);
 
-                       DEBUG("add context. pid_elem: %p\n", pid_elem);
+                       CODEC_DBG(KERN_DEBUG, "add context. pid_elem: %p\n", pid_elem);
                        if (pid_elem && pid_elem->id == user_pid) {
 
-                               DEBUG("add context. user_pid: %x, ctx_id: %d\n",
+                               CODEC_DBG(KERN_DEBUG, "add context. user_pid: %x, ctx_id: %d\n",
                                                user_pid, ctx_id);
 
                                cid_elem = kzalloc(sizeof(struct context_id), GFP_KERNEL);
                                if (!cid_elem) {
-                                       ERROR("failed to allocate context_mgr memory\n");
+                                       CODEC_DBG(KERN_ERR, "failed to allocate context_mgr memory\n");
                                        return;
                                }
 
                                INIT_LIST_HEAD(&cid_elem->node);
 
-                               DEBUG("add context. user_pid: %x, pid_elem: %p, cid_elem: %p, node: %p\n",
+                               CODEC_DBG(KERN_DEBUG, "add context. user_pid: %x, pid_elem: %p, cid_elem: %p, node: %p\n",
                                                user_pid, pid_elem, cid_elem, &cid_elem->node);
 
                                cid_elem->id = ctx_id;
@@ -822,11 +819,11 @@ static void context_add(uint32_t user_pid, uint32_t ctx_id)
                        }
                }
        } else {
-               DEBUG("user_pid_mgr is empty\n");
+               CODEC_DBG(KERN_DEBUG, "user_pid_mgr is empty\n");
        }
        LEAVE_CRITICAL_SECTION(flags);
 
-       DEBUG("leave: %s\n", __func__);
+       CODEC_DBG(KERN_DEBUG, "leave: %s\n", __func__);
 }
 
 static void maru_brill_codec_context_remove(struct user_process_id *pid_elem)
@@ -834,31 +831,31 @@ static void maru_brill_codec_context_remove(struct user_process_id *pid_elem)
        struct list_head *pos, *temp;
        struct context_id *cid_elem = NULL;
 
-       DEBUG("enter: %s\n", __func__);
+       CODEC_DBG(KERN_DEBUG, "enter: %s\n", __func__);
 
        if (!list_empty(&pid_elem->ctx_id_mgr)) {
                list_for_each_safe(pos, temp, &pid_elem->ctx_id_mgr) {
                        cid_elem = list_entry(pos, struct context_id, node);
                        if (cid_elem) {
                                if (cid_elem->id > 0 && cid_elem->id < CODEC_CONTEXT_SIZE) {
-                                       DEBUG("remove context. ctx_id: %d\n", cid_elem->id);
+                                       CODEC_DBG(KERN_DEBUG, "remove context. ctx_id: %d\n", cid_elem->id);
                                        writel(cid_elem->id,
                                                        maru_brill_codec->ioaddr + CODEC_CMD_RELEASE_CONTEXT);
                                        dispose_device_memory(cid_elem->id);
                                }
 
-                               DEBUG("delete node from ctx_id_mgr. %p\n", &cid_elem->node);
+                               CODEC_DBG(KERN_DEBUG, "delete node from ctx_id_mgr. %p\n", &cid_elem->node);
                                __list_del_entry(&cid_elem->node);
-                               DEBUG("release cid_elem. %p\n", cid_elem);
+                               CODEC_DBG(KERN_DEBUG, "release cid_elem. %p\n", cid_elem);
                                kfree(cid_elem);
                        } else {
-                               DEBUG("no context in the pid_elem\n");
+                               CODEC_DBG(KERN_DEBUG, "no context in the pid_elem\n");
                        }
                }
        } else {
-               DEBUG("ctx_id_mgr is empty. user_pid: %x\n", pid_elem->id);
+               CODEC_DBG(KERN_DEBUG, "ctx_id_mgr is empty. user_pid: %x\n", pid_elem->id);
        }
-       DEBUG("leave: %s\n", __func__);
+       CODEC_DBG(KERN_DEBUG, "leave: %s\n", __func__);
 }
 
 static void maru_brill_codec_task_add(uint32_t user_pid)
@@ -866,25 +863,25 @@ static void maru_brill_codec_task_add(uint32_t user_pid)
        struct user_process_id *pid_elem = NULL;
        unsigned long flags;
 
-       DEBUG("enter: %s\n", __func__);
+       CODEC_DBG(KERN_DEBUG, "enter: %s\n", __func__);
 
        ENTER_CRITICAL_SECTION(flags);
        pid_elem = kzalloc(sizeof(struct user_process_id), GFP_KERNEL);
        if (!pid_elem) {
-               ERROR("failed to allocate user_process memory\n");
+               CODEC_DBG(KERN_ERR, "failed to allocate user_process memory\n");
                return;
        }
 
        INIT_LIST_HEAD(&pid_elem->pid_node);
        INIT_LIST_HEAD(&pid_elem->ctx_id_mgr);
 
-       DEBUG("add task. user_pid: %x, pid_elem: %p, pid_node: %p\n",
+       CODEC_DBG(KERN_DEBUG, "add task. user_pid: %x, pid_elem: %p, pid_node: %p\n",
                user_pid, pid_elem, &pid_elem->pid_node);
        pid_elem->id = user_pid;
        list_add_tail(&pid_elem->pid_node, &maru_brill_codec->user_pid_mgr);
        LEAVE_CRITICAL_SECTION(flags);
 
-       DEBUG("leave: %s\n", __func__);
+       CODEC_DBG(KERN_DEBUG, "leave: %s\n", __func__);
 }
 
 static void maru_brill_codec_task_remove(uint32_t user_pid)
@@ -893,7 +890,7 @@ static void maru_brill_codec_task_remove(uint32_t user_pid)
        struct user_process_id *pid_elem = NULL;
        unsigned long flags;
 
-       DEBUG("enter: %s\n", __func__);
+       CODEC_DBG(KERN_DEBUG, "enter: %s\n", __func__);
 
        ENTER_CRITICAL_SECTION(flags);
        if (!list_empty(&maru_brill_codec->user_pid_mgr)) {
@@ -902,36 +899,36 @@ static void maru_brill_codec_task_remove(uint32_t user_pid)
                        if (pid_elem) {
                                if (pid_elem->id == user_pid) {
                                        // remove task and codec contexts that is running in the task.
-                                       DEBUG("remove task. user_pid: %x, pid_elem: %p\n",
+                                       CODEC_DBG(KERN_DEBUG, "remove task. user_pid: %x, pid_elem: %p\n",
                                                        user_pid, pid_elem);
                                        maru_brill_codec_context_remove(pid_elem);
                                }
 
-                               DEBUG("move pid_node from user_pid_mgr. %p\n", &pid_elem->pid_node);
+                               CODEC_DBG(KERN_DEBUG, "move pid_node from user_pid_mgr. %p\n", &pid_elem->pid_node);
                                __list_del_entry(&pid_elem->pid_node);
-                               DEBUG("release pid_elem. %p\n", pid_elem);
+                               CODEC_DBG(KERN_DEBUG, "release pid_elem. %p\n", pid_elem);
                                kfree(pid_elem);
                        } else {
-                               DEBUG("no task in the user_pid_mgr\n");
+                               CODEC_DBG(KERN_DEBUG, "no task in the user_pid_mgr\n");
                        }
                }
        } else {
-               DEBUG("user_pid_mgr is empty\n");
+               CODEC_DBG(KERN_DEBUG, "user_pid_mgr is empty\n");
        }
        LEAVE_CRITICAL_SECTION(flags);
 
-       DEBUG("leave: %s\n", __func__);
+       CODEC_DBG(KERN_DEBUG, "leave: %s\n", __func__);
 }
 
 
 static int maru_brill_codec_open(struct inode *inode, struct file *file)
 {
-       DEBUG("open! struct file: %p\n", file);
+       CODEC_DBG(KERN_DEBUG, "open! struct file: %p\n", file);
 
        /* register interrupt handler */
        if (request_irq(maru_brill_codec->dev->irq, maru_brill_codec_irq_handler,
                IRQF_SHARED, DEVICE_NAME, maru_brill_codec)) {
-               ERROR("failed to register irq handle\n");
+               CODEC_DBG(KERN_ERR, "failed to register irq handle\n");
                return -EBUSY;
        }
 
@@ -944,15 +941,15 @@ static int maru_brill_codec_open(struct inode *inode, struct file *file)
 
 static int maru_brill_codec_release(struct inode *inode, struct file *file)
 {
-       DEBUG("close! struct file: %p\n", file);
+       CODEC_DBG(KERN_DEBUG, "close! struct file: %p\n", file);
 
        /* free irq */
        if (maru_brill_codec->dev->irq) {
-               DEBUG("free registered irq\n");
+               CODEC_DBG(KERN_DEBUG, "free registered irq\n");
                free_irq(maru_brill_codec->dev->irq, maru_brill_codec);
        }
 
-       DEBUG("before removing task: %x\n", (uint32_t)file);
+       CODEC_DBG(KERN_DEBUG, "before removing task: %x\n", (uint32_t)file);
        /* free resource */
        maru_brill_codec_task_remove((uint32_t)file);
 
@@ -997,7 +994,7 @@ static int maru_brill_codec_probe(struct pci_dev *pci_dev,
        maru_brill_codec =
                kzalloc(sizeof(struct maru_brill_codec_device), GFP_KERNEL);
        if (!maru_brill_codec) {
-               ERROR("Failed to allocate memory for codec.\n");
+               CODEC_DBG(KERN_ERR, "Failed to allocate memory for codec.\n");
                return -ENOMEM;
        }
 
@@ -1032,7 +1029,7 @@ static int maru_brill_codec_probe(struct pci_dev *pci_dev,
        spin_lock_init(&maru_brill_codec->lock);
 
        if ((ret = pci_enable_device(pci_dev))) {
-               ERROR("pci_enable_device failed\n");
+               CODEC_DBG(KERN_ERR, "pci_enable_device failed\n");
                return ret;
        }
        pci_set_master(pci_dev);
@@ -1040,7 +1037,7 @@ static int maru_brill_codec_probe(struct pci_dev *pci_dev,
        maru_brill_codec->mem_start = pci_resource_start(pci_dev, 0);
        maru_brill_codec->mem_size = pci_resource_len(pci_dev, 0);
        if (!maru_brill_codec->mem_start) {
-               ERROR("pci_resource_start failed\n");
+               CODEC_DBG(KERN_ERR, "pci_resource_start failed\n");
                pci_disable_device(pci_dev);
                return -ENODEV;
        }
@@ -1048,7 +1045,7 @@ static int maru_brill_codec_probe(struct pci_dev *pci_dev,
        if (!request_mem_region(maru_brill_codec->mem_start,
                                maru_brill_codec->mem_size,
                                DEVICE_NAME)) {
-               ERROR("request_mem_region failed\n");
+               CODEC_DBG(KERN_ERR, "request_mem_region failed\n");
                pci_disable_device(pci_dev);
                return -EINVAL;
        }
@@ -1056,7 +1053,7 @@ static int maru_brill_codec_probe(struct pci_dev *pci_dev,
        maru_brill_codec->io_start = pci_resource_start(pci_dev, 1);
        maru_brill_codec->io_size = pci_resource_len(pci_dev, 1);
        if (!maru_brill_codec->io_start) {
-               ERROR("pci_resource_start failed\n");
+               CODEC_DBG(KERN_ERR, "pci_resource_start failed\n");
                release_mem_region(maru_brill_codec->mem_start, maru_brill_codec->mem_size);
                pci_disable_device(pci_dev);
                return -ENODEV;
@@ -1065,7 +1062,7 @@ static int maru_brill_codec_probe(struct pci_dev *pci_dev,
        if (!request_mem_region(maru_brill_codec->io_start,
                                maru_brill_codec->io_size,
                                DEVICE_NAME)) {
-               ERROR("request_io_region failed\n");
+               CODEC_DBG(KERN_ERR, "request_io_region failed\n");
                release_mem_region(maru_brill_codec->mem_start, maru_brill_codec->mem_size);
                pci_disable_device(pci_dev);
                return -EINVAL;
@@ -1074,7 +1071,7 @@ static int maru_brill_codec_probe(struct pci_dev *pci_dev,
        maru_brill_codec->ioaddr =
                ioremap_nocache(maru_brill_codec->io_start, maru_brill_codec->io_size);
        if (!maru_brill_codec->ioaddr) {
-               ERROR("ioremap failed\n");
+               CODEC_DBG(KERN_ERR, "ioremap failed\n");
                release_mem_region(maru_brill_codec->io_start, maru_brill_codec->io_size);
                release_mem_region(maru_brill_codec->mem_start, maru_brill_codec->mem_size);
                pci_disable_device(pci_dev);
@@ -1084,7 +1081,7 @@ static int maru_brill_codec_probe(struct pci_dev *pci_dev,
        maru_brill_codec_get_device_version();
 
        if ((ret = misc_register(&codec_dev))) {
-               ERROR("cannot register codec as misc\n");
+               CODEC_DBG(KERN_ERR, "cannot register codec as misc\n");
                iounmap(maru_brill_codec->ioaddr);
                release_mem_region(maru_brill_codec->io_start, maru_brill_codec->io_size);
                release_mem_region(maru_brill_codec->mem_start, maru_brill_codec->mem_size);
@@ -1159,7 +1156,7 @@ static int __init maru_brill_codec_init(void)
 
        codec_bh_workqueue = create_workqueue ("maru_brill_codec");
        if (!codec_bh_workqueue) {
-               ERROR("failed to allocate workqueue\n");
+               CODEC_DBG(KERN_ERR, "failed to allocate workqueue\n");
                return -ENOMEM;
        }
 
index 5c45c9d0712ddf949f0ac571b5649111585046ae..9c29552aedec2e7b1a08447619671cd65c170b77 100644 (file)
@@ -156,6 +156,9 @@ static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struc
                   0x00, 0x00, 0x00, 0x00,
                   0x00, 0x00 };
 
+       if (cmd->msg_len > sizeof(b) - 4)
+               return -EINVAL;
+
        memcpy(&b[4], cmd->msg, cmd->msg_len);
 
        state->config->send_command(fe, 0x72,
index 36a7740e827c997ebbc156bfcb6e908cb02d1804..cc5a430dc357cb276cd46dff9c7977b0d02f3a4b 100644 (file)
@@ -521,6 +521,10 @@ static void command_port_read_callback(struct urb *urb)
                dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__);
                return;
        }
+       if (!urb->actual_length) {
+               dev_dbg(&urb->dev->dev, "%s - empty response, exiting.\n", __func__);
+               return;
+       }
        if (status) {
                dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status);
                if (status != -ENOENT)
@@ -541,7 +545,8 @@ static void command_port_read_callback(struct urb *urb)
                /* These are unsolicited reports from the firmware, hence no
                   waiting command to wakeup */
                dev_dbg(&urb->dev->dev, "%s - event received\n", __func__);
-       } else if (data[0] == WHITEHEAT_GET_DTR_RTS) {
+       } else if ((data[0] == WHITEHEAT_GET_DTR_RTS) &&
+               (urb->actual_length - 1 <= sizeof(command_info->result_buffer))) {
                memcpy(command_info->result_buffer, &data[1],
                                                urb->actual_length - 1);
                command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
index e5d408a7ea4a27c88af2a1dcae32259eab1f7083..2e2af97df0752c7cccd26352ad9acf6070b06e61 100644 (file)
@@ -61,7 +61,7 @@ static void isofs_put_super(struct super_block *sb)
        return;
 }
 
-static int isofs_read_inode(struct inode *);
+static int isofs_read_inode(struct inode *, int relocated);
 static int isofs_statfs (struct dentry *, struct kstatfs *);
 
 static struct kmem_cache *isofs_inode_cachep;
@@ -1258,7 +1258,7 @@ out_toomany:
        goto out;
 }
 
-static int isofs_read_inode(struct inode *inode)
+static int isofs_read_inode(struct inode *inode, int relocated)
 {
        struct super_block *sb = inode->i_sb;
        struct isofs_sb_info *sbi = ISOFS_SB(sb);
@@ -1403,7 +1403,7 @@ static int isofs_read_inode(struct inode *inode)
         */
 
        if (!high_sierra) {
-               parse_rock_ridge_inode(de, inode);
+               parse_rock_ridge_inode(de, inode, relocated);
                /* if we want uid/gid set, override the rock ridge setting */
                if (sbi->s_uid_set)
                        inode->i_uid = sbi->s_uid;
@@ -1482,9 +1482,10 @@ static int isofs_iget5_set(struct inode *ino, void *data)
  * offset that point to the underlying meta-data for the inode.  The
  * code below is otherwise similar to the iget() code in
  * include/linux/fs.h */
-struct inode *isofs_iget(struct super_block *sb,
-                        unsigned long block,
-                        unsigned long offset)
+struct inode *__isofs_iget(struct super_block *sb,
+                          unsigned long block,
+                          unsigned long offset,
+                          int relocated)
 {
        unsigned long hashval;
        struct inode *inode;
@@ -1506,7 +1507,7 @@ struct inode *isofs_iget(struct super_block *sb,
                return ERR_PTR(-ENOMEM);
 
        if (inode->i_state & I_NEW) {
-               ret = isofs_read_inode(inode);
+               ret = isofs_read_inode(inode, relocated);
                if (ret < 0) {
                        iget_failed(inode);
                        inode = ERR_PTR(ret);
index 99167238518d61a30c4a5e6bfc838291d6206a5d..0ac4c1f73fbd6c2616e04ad6310995426c03da68 100644 (file)
@@ -107,7 +107,7 @@ extern int iso_date(char *, int);
 
 struct inode;          /* To make gcc happy */
 
-extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *);
+extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *, int relocated);
 extern int get_rock_ridge_filename(struct iso_directory_record *, char *, struct inode *);
 extern int isofs_name_translate(struct iso_directory_record *, char *, struct inode *);
 
@@ -118,9 +118,24 @@ extern struct dentry *isofs_lookup(struct inode *, struct dentry *, unsigned int
 extern struct buffer_head *isofs_bread(struct inode *, sector_t);
 extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long);
 
-extern struct inode *isofs_iget(struct super_block *sb,
-                                unsigned long block,
-                                unsigned long offset);
+struct inode *__isofs_iget(struct super_block *sb,
+                          unsigned long block,
+                          unsigned long offset,
+                          int relocated);
+
+static inline struct inode *isofs_iget(struct super_block *sb,
+                                      unsigned long block,
+                                      unsigned long offset)
+{
+       return __isofs_iget(sb, block, offset, 0);
+}
+
+static inline struct inode *isofs_iget_reloc(struct super_block *sb,
+                                            unsigned long block,
+                                            unsigned long offset)
+{
+       return __isofs_iget(sb, block, offset, 1);
+}
 
 /* Because the inode number is no longer relevant to finding the
  * underlying meta-data for an inode, we are free to choose a more
index c0bf42472e408fd16911cee33f3d9079943aa46a..735d7522a3a911f19af593d6b5f7d366d6cf448d 100644 (file)
@@ -30,6 +30,7 @@ struct rock_state {
        int cont_size;
        int cont_extent;
        int cont_offset;
+       int cont_loops;
        struct inode *inode;
 };
 
@@ -73,6 +74,9 @@ static void init_rock_state(struct rock_state *rs, struct inode *inode)
        rs->inode = inode;
 }
 
+/* Maximum number of Rock Ridge continuation entries */
+#define RR_MAX_CE_ENTRIES 32
+
 /*
  * Returns 0 if the caller should continue scanning, 1 if the scan must end
  * and -ve on error.
@@ -105,6 +109,8 @@ static int rock_continue(struct rock_state *rs)
                        goto out;
                }
                ret = -EIO;
+               if (++rs->cont_loops >= RR_MAX_CE_ENTRIES)
+                       goto out;
                bh = sb_bread(rs->inode->i_sb, rs->cont_extent);
                if (bh) {
                        memcpy(rs->buffer, bh->b_data + rs->cont_offset,
@@ -288,12 +294,16 @@ eio:
        goto out;
 }
 
+#define RR_REGARD_XA 1
+#define RR_RELOC_DE 2
+
 static int
 parse_rock_ridge_inode_internal(struct iso_directory_record *de,
-                               struct inode *inode, int regard_xa)
+                               struct inode *inode, int flags)
 {
        int symlink_len = 0;
        int cnt, sig;
+       unsigned int reloc_block;
        struct inode *reloc;
        struct rock_ridge *rr;
        int rootflag;
@@ -305,7 +315,7 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de,
 
        init_rock_state(&rs, inode);
        setup_rock_ridge(de, inode, &rs);
-       if (regard_xa) {
+       if (flags & RR_REGARD_XA) {
                rs.chr += 14;
                rs.len -= 14;
                if (rs.len < 0)
@@ -352,6 +362,9 @@ repeat:
                        rs.cont_size = isonum_733(rr->u.CE.size);
                        break;
                case SIG('E', 'R'):
+                       /* Invalid length of ER tag id? */
+                       if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len)
+                               goto out;
                        ISOFS_SB(inode->i_sb)->s_rock = 1;
                        printk(KERN_DEBUG "ISO 9660 Extensions: ");
                        {
@@ -485,12 +498,22 @@ repeat:
                                        "relocated directory\n");
                        goto out;
                case SIG('C', 'L'):
-                       ISOFS_I(inode)->i_first_extent =
-                           isonum_733(rr->u.CL.location);
-                       reloc =
-                           isofs_iget(inode->i_sb,
-                                      ISOFS_I(inode)->i_first_extent,
-                                      0);
+                       if (flags & RR_RELOC_DE) {
+                               printk(KERN_ERR
+                                      "ISOFS: Recursive directory relocation "
+                                      "is not supported\n");
+                               goto eio;
+                       }
+                       reloc_block = isonum_733(rr->u.CL.location);
+                       if (reloc_block == ISOFS_I(inode)->i_iget5_block &&
+                           ISOFS_I(inode)->i_iget5_offset == 0) {
+                               printk(KERN_ERR
+                                      "ISOFS: Directory relocation points to "
+                                      "itself\n");
+                               goto eio;
+                       }
+                       ISOFS_I(inode)->i_first_extent = reloc_block;
+                       reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0);
                        if (IS_ERR(reloc)) {
                                ret = PTR_ERR(reloc);
                                goto out;
@@ -637,9 +660,11 @@ static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit)
        return rpnt;
 }
 
-int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
+int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
+                          int relocated)
 {
-       int result = parse_rock_ridge_inode_internal(de, inode, 0);
+       int flags = relocated ? RR_RELOC_DE : 0;
+       int result = parse_rock_ridge_inode_internal(de, inode, flags);
 
        /*
         * if rockridge flag was reset and we didn't look for attributes
@@ -647,7 +672,8 @@ int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
         */
        if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1)
            && (ISOFS_SB(inode->i_sb)->s_rock == 2)) {
-               result = parse_rock_ridge_inode_internal(de, inode, 14);
+               result = parse_rock_ridge_inode_internal(de, inode,
+                                                        flags | RR_REGARD_XA);
        }
        return result;
 }
index 84447dbcb650eecba1821d9ef58d1f638f743c9d..4ea2b7378d8ca0c608cf9ad5013f8088ecd6a656 100644 (file)
@@ -827,8 +827,21 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
 
        mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
        /* Don't allow unprivileged users to change mount flags */
-       if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
-               mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
+       if (flag & CL_UNPRIVILEGED) {
+               mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
+
+               if (mnt->mnt.mnt_flags & MNT_READONLY)
+                       mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
+
+               if (mnt->mnt.mnt_flags & MNT_NODEV)
+                       mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
+
+               if (mnt->mnt.mnt_flags & MNT_NOSUID)
+                       mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
+
+               if (mnt->mnt.mnt_flags & MNT_NOEXEC)
+                       mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
+       }
 
        /* Don't allow unprivileged users to reveal what is under a mount */
        if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire))
@@ -1806,9 +1819,6 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
        if (readonly_request == __mnt_is_readonly(mnt))
                return 0;
 
-       if (mnt->mnt_flags & MNT_LOCK_READONLY)
-               return -EPERM;
-
        if (readonly_request)
                error = mnt_make_readonly(real_mount(mnt));
        else
@@ -1834,6 +1844,33 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
        if (path->dentry != path->mnt->mnt_root)
                return -EINVAL;
 
+       /* Don't allow changing of locked mnt flags.
+        *
+        * No locks need to be held here while testing the various
+        * MNT_LOCK flags because those flags can never be cleared
+        * once they are set.
+        */
+       if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
+           !(mnt_flags & MNT_READONLY)) {
+               return -EPERM;
+       }
+       if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
+           !(mnt_flags & MNT_NODEV)) {
+               return -EPERM;
+       }
+       if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
+           !(mnt_flags & MNT_NOSUID)) {
+               return -EPERM;
+       }
+       if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) &&
+           !(mnt_flags & MNT_NOEXEC)) {
+               return -EPERM;
+       }
+       if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
+           ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) {
+               return -EPERM;
+       }
+
        err = security_sb_remount(sb, data);
        if (err)
                return err;
@@ -1847,7 +1884,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
                err = do_remount_sb(sb, flags, data, 0);
        if (!err) {
                br_write_lock(&vfsmount_lock);
-               mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
+               mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
                mnt->mnt.mnt_flags = mnt_flags;
                br_write_unlock(&vfsmount_lock);
        }
@@ -2036,7 +2073,7 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
                 */
                if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
                        flags |= MS_NODEV;
-                       mnt_flags |= MNT_NODEV;
+                       mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
                }
        }
 
@@ -2354,6 +2391,14 @@ long do_mount(const char *dev_name, const char *dir_name,
        if (flags & MS_RDONLY)
                mnt_flags |= MNT_READONLY;
 
+       /* The default atime for remount is preservation */
+       if ((flags & MS_REMOUNT) &&
+           ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
+                      MS_STRICTATIME)) == 0)) {
+               mnt_flags &= ~MNT_ATIME_MASK;
+               mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
+       }
+
        flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
                   MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
                   MS_STRICTATIME);
index 38cd98f112a0e4ccb62e32d94a2d1065ce76b2dd..af99cff671df926979cf326e8735b18e90eea749 100644 (file)
@@ -42,11 +42,17 @@ struct mnt_namespace;
  * flag, consider how it interacts with shared mounts.
  */
 #define MNT_SHARED_MASK        (MNT_UNBINDABLE)
-#define MNT_PROPAGATION_MASK   (MNT_SHARED | MNT_UNBINDABLE)
-
+#define MNT_USER_SETTABLE_MASK  (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
+                                | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
+                                | MNT_READONLY)
+#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
 
 #define MNT_INTERNAL   0x4000
 
+#define MNT_LOCK_ATIME         0x040000
+#define MNT_LOCK_NOEXEC                0x080000
+#define MNT_LOCK_NOSUID                0x100000
+#define MNT_LOCK_NODEV         0x200000
 #define MNT_LOCK_READONLY      0x400000
 #define MNT_LOCKED             0x800000
 
index 3794c5ad20fef72a960c34323dab275d6f26bac7..3848934ab1627423b514001bbdbd3c8c6a9209a2 100644 (file)
@@ -454,6 +454,11 @@ static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_associat
        asoc->pmtu_pending = 0;
 }
 
+static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
+{
+       return !list_empty(&chunk->list);
+}
+
 /* Walk through a list of TLV parameters.  Don't trust the
  * individual parameter lengths and instead depend on
  * the chunk length to indicate when to stop.  Make sure
index 4ef75af340b633c7e8a5fabeb34dd88cdbd73ef2..c91b6f5c07a555baa3a3f194ff00c6328ea66e7d 100644 (file)
@@ -249,9 +249,9 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
                                              int, __be16);
 struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
                                             union sctp_addr *addr);
-int sctp_verify_asconf(const struct sctp_association *asoc,
-                      struct sctp_paramhdr *param_hdr, void *chunk_end,
-                      struct sctp_paramhdr **errp);
+bool sctp_verify_asconf(const struct sctp_association *asoc,
+                       struct sctp_chunk *chunk, bool addr_param_needed,
+                       struct sctp_paramhdr **errp);
 struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
                                       struct sctp_chunk *asconf);
 int sctp_process_asconf_ack(struct sctp_association *asoc,
index 2a14f1f02d4f6766e236b288f6173ee7391e78a2..d6bc9616058b0c245c965241efb882391bcd60ed 100644 (file)
@@ -121,6 +121,8 @@ struct snd_card {
        int user_ctl_count;             /* count of all user controls */
        struct list_head controls;      /* all controls for this card */
        struct list_head ctl_files;     /* active control files */
+       struct mutex user_ctl_lock;     /* protects user controls against
+                                          concurrent access */
 
        struct snd_info_entry *proc_root;       /* root for soundcard specific files */
        struct snd_info_entry *proc_id; /* the card id */
index d8347b7a064f8b1c0e7e99bcd29e927637880762..4c725cf8510c37b591e464d8fce8491cf8fe57f0 100644 (file)
@@ -748,10 +748,18 @@ retry:
                return -EDEADLK;
 
        /*
-        * Surprise - we got the lock. Just return to userspace:
+        * Surprise - we got the lock, but we do not trust user space at all.
         */
-       if (unlikely(!curval))
-               return 1;
+       if (unlikely(!curval)) {
+               /*
+                * We verify whether there is kernel state for this
+                * futex. If not, we can safely assume, that the 0 ->
+                * TID transition is correct. If state exists, we do
+                * not bother to fixup the user space state as it was
+                * corrupted already.
+                */
+               return futex_top_waiter(hb, key) ? -EINVAL : 1;
+       }
 
        uval = curval;
 
@@ -881,6 +889,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
        struct task_struct *new_owner;
        struct futex_pi_state *pi_state = this->pi_state;
        u32 uninitialized_var(curval), newval;
+       int ret = 0;
 
        if (!pi_state)
                return -EINVAL;
@@ -904,23 +913,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
                new_owner = this->task;
 
        /*
-        * We pass it to the next owner. (The WAITERS bit is always
-        * kept enabled while there is PI state around. We must also
-        * preserve the owner died bit.)
+        * We pass it to the next owner. The WAITERS bit is always
+        * kept enabled while there is PI state around. We cleanup the
+        * owner died bit, because we are the owner.
         */
-       if (!(uval & FUTEX_OWNER_DIED)) {
-               int ret = 0;
-
-               newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+       newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
 
-               if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
-                       ret = -EFAULT;
-               else if (curval != uval)
-                       ret = -EINVAL;
-               if (ret) {
-                       raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
-                       return ret;
-               }
+       if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
+               ret = -EFAULT;
+       else if (curval != uval)
+               ret = -EINVAL;
+       if (ret) {
+               raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+               return ret;
        }
 
        raw_spin_lock_irq(&pi_state->owner->pi_lock);
@@ -1277,6 +1282,13 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
        u32 curval2;
 
        if (requeue_pi) {
+               /*
+                * Requeue PI only works on two distinct uaddrs. This
+                * check is only valid for private futexes. See below.
+                */
+               if (uaddr1 == uaddr2)
+                       return -EINVAL;
+
                /*
                 * requeue_pi requires a pi_state, try to allocate it now
                 * without any locks in case it fails.
@@ -1315,6 +1327,15 @@ retry:
        if (unlikely(ret != 0))
                goto out_put_key1;
 
+       /*
+        * The check above which compares uaddrs is not sufficient for
+        * shared futexes. We need to compare the keys:
+        */
+       if (requeue_pi && match_futex(&key1, &key2)) {
+               ret = -EINVAL;
+               goto out_put_keys;
+       }
+
        hb1 = hash_futex(&key1);
        hb2 = hash_futex(&key2);
 
@@ -2139,9 +2160,10 @@ retry:
        /*
         * To avoid races, try to do the TID -> 0 atomic transition
         * again. If it succeeds then we can return without waking
-        * anyone else up:
+        * anyone else up. We only try this if neither the waiters nor
+        * the owner died bit are set.
         */
-       if (!(uval & FUTEX_OWNER_DIED) &&
+       if (!(uval & ~FUTEX_TID_MASK) &&
            cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
                goto pi_faulted;
        /*
@@ -2173,11 +2195,9 @@ retry:
        /*
         * No waiters - kernel unlocks the futex:
         */
-       if (!(uval & FUTEX_OWNER_DIED)) {
-               ret = unlock_futex_pi(uaddr, uval);
-               if (ret == -EFAULT)
-                       goto pi_faulted;
-       }
+       ret = unlock_futex_pi(uaddr, uval);
+       if (ret == -EFAULT)
+               goto pi_faulted;
 
 out_unlock:
        spin_unlock(&hb->lock);
@@ -2336,6 +2356,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
        if (ret)
                goto out_key2;
 
+       /*
+        * The check above which compares uaddrs is not sufficient for
+        * shared futexes. We need to compare the keys:
+        */
+       if (match_futex(&q.key, &key2)) {
+               ret = -EINVAL;
+               goto out_put_keys;
+       }
+
        /* Queue the futex_q, drop the hb lock, wait for wakeup. */
        futex_wait_queue_me(hb, &q, to);
 
index 13fb1134ba582e49c8aa3643feada72a2b0dae8b..eef9c004abc18bf6d16bbd431b669d90b5d64e5d 100644 (file)
@@ -799,7 +799,9 @@ static bool new_idmap_permitted(const struct file *file,
                                struct user_namespace *ns, int cap_setid,
                                struct uid_gid_map *new_map)
 {
-       /* Allow mapping to your own filesystem ids */
+       /* Don't allow mappings that would allow anything that wouldn't
+        * be allowed without the establishment of unprivileged mappings.
+        */
        if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) {
                u32 id = new_map->extent[0].lower_first;
                if (cap_setid == CAP_SETUID) {
index df6839e3ce0886a481e8565f8b19d5c71c9b299a..99a03acb7d470570b816ada76f8045d9930c6a27 100644 (file)
@@ -72,6 +72,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
                        len = *ip++;
                        for (; len == 255; length += 255)
                                len = *ip++;
+                       if (unlikely(length > (size_t)(length + len)))
+                               goto _output_error;
                        length += len;
                }
 
index 569985d522d518a8992929d5924b6a5062ff9e93..8563081e8da38fb81e0335d2589c9fcebcd81266 100644 (file)
 #include <linux/lzo.h>
 #include "lzodefs.h"
 
-#define HAVE_IP(x)      ((size_t)(ip_end - ip) >= (size_t)(x))
-#define HAVE_OP(x)      ((size_t)(op_end - op) >= (size_t)(x))
-#define NEED_IP(x)      if (!HAVE_IP(x)) goto input_overrun
-#define NEED_OP(x)      if (!HAVE_OP(x)) goto output_overrun
-#define TEST_LB(m_pos)  if ((m_pos) < out) goto lookbehind_overrun
+#define HAVE_IP(t, x)                                  \
+       (((size_t)(ip_end - ip) >= (size_t)(t + x)) &&  \
+        (((t + x) >= t) && ((t + x) >= x)))
+
+#define HAVE_OP(t, x)                                  \
+       (((size_t)(op_end - op) >= (size_t)(t + x)) &&  \
+        (((t + x) >= t) && ((t + x) >= x)))
+
+#define NEED_IP(t, x)                                  \
+       do {                                            \
+               if (!HAVE_IP(t, x))                     \
+                       goto input_overrun;             \
+       } while (0)
+
+#define NEED_OP(t, x)                                  \
+       do {                                            \
+               if (!HAVE_OP(t, x))                     \
+                       goto output_overrun;            \
+       } while (0)
+
+#define TEST_LB(m_pos)                                 \
+       do {                                            \
+               if ((m_pos) < out)                      \
+                       goto lookbehind_overrun;        \
+       } while (0)
 
 int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
                          unsigned char *out, size_t *out_len)
@@ -58,14 +78,14 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
                                        while (unlikely(*ip == 0)) {
                                                t += 255;
                                                ip++;
-                                               NEED_IP(1);
+                                               NEED_IP(1, 0);
                                        }
                                        t += 15 + *ip++;
                                }
                                t += 3;
 copy_literal_run:
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-                               if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
+                               if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
                                        const unsigned char *ie = ip + t;
                                        unsigned char *oe = op + t;
                                        do {
@@ -81,8 +101,8 @@ copy_literal_run:
                                } else
 #endif
                                {
-                                       NEED_OP(t);
-                                       NEED_IP(t + 3);
+                                       NEED_OP(t, 0);
+                                       NEED_IP(t, 3);
                                        do {
                                                *op++ = *ip++;
                                        } while (--t > 0);
@@ -95,7 +115,7 @@ copy_literal_run:
                                m_pos -= t >> 2;
                                m_pos -= *ip++ << 2;
                                TEST_LB(m_pos);
-                               NEED_OP(2);
+                               NEED_OP(2, 0);
                                op[0] = m_pos[0];
                                op[1] = m_pos[1];
                                op += 2;
@@ -119,10 +139,10 @@ copy_literal_run:
                                while (unlikely(*ip == 0)) {
                                        t += 255;
                                        ip++;
-                                       NEED_IP(1);
+                                       NEED_IP(1, 0);
                                }
                                t += 31 + *ip++;
-                               NEED_IP(2);
+                               NEED_IP(2, 0);
                        }
                        m_pos = op - 1;
                        next = get_unaligned_le16(ip);
@@ -137,10 +157,10 @@ copy_literal_run:
                                while (unlikely(*ip == 0)) {
                                        t += 255;
                                        ip++;
-                                       NEED_IP(1);
+                                       NEED_IP(1, 0);
                                }
                                t += 7 + *ip++;
-                               NEED_IP(2);
+                               NEED_IP(2, 0);
                        }
                        next = get_unaligned_le16(ip);
                        ip += 2;
@@ -154,7 +174,7 @@ copy_literal_run:
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
                if (op - m_pos >= 8) {
                        unsigned char *oe = op + t;
-                       if (likely(HAVE_OP(t + 15))) {
+                       if (likely(HAVE_OP(t, 15))) {
                                do {
                                        COPY8(op, m_pos);
                                        op += 8;
@@ -164,7 +184,7 @@ copy_literal_run:
                                        m_pos += 8;
                                } while (op < oe);
                                op = oe;
-                               if (HAVE_IP(6)) {
+                               if (HAVE_IP(6, 0)) {
                                        state = next;
                                        COPY4(op, ip);
                                        op += next;
@@ -172,7 +192,7 @@ copy_literal_run:
                                        continue;
                                }
                        } else {
-                               NEED_OP(t);
+                               NEED_OP(t, 0);
                                do {
                                        *op++ = *m_pos++;
                                } while (op < oe);
@@ -181,7 +201,7 @@ copy_literal_run:
 #endif
                {
                        unsigned char *oe = op + t;
-                       NEED_OP(t);
+                       NEED_OP(t, 0);
                        op[0] = m_pos[0];
                        op[1] = m_pos[1];
                        op += 2;
@@ -194,15 +214,15 @@ match_next:
                state = next;
                t = next;
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-               if (likely(HAVE_IP(6) && HAVE_OP(4))) {
+               if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
                        COPY4(op, ip);
                        op += t;
                        ip += t;
                } else
 #endif
                {
-                       NEED_IP(t + 3);
-                       NEED_OP(t);
+                       NEED_IP(t, 3);
+                       NEED_OP(t, 0);
                        while (t > 0) {
                                *op++ = *ip++;
                                t--;
index 8297623fcaedec21b37b5080d376967e673afe92..6f5626fca3cccea80078d4ebab1fb7bb981e036c 100644 (file)
@@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
 #define SHORT_SYMLINK_LEN 128
 
 /*
- * shmem_fallocate and shmem_writepage communicate via inode->i_private
- * (with i_mutex making sure that it has only one user at a time):
- * we would prefer not to enlarge the shmem inode just for that.
+ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
+ * inode->i_private (with i_mutex making sure that it has only one user at
+ * a time): we would prefer not to enlarge the shmem inode just for that.
  */
 struct shmem_falloc {
+       wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
        pgoff_t start;          /* start of range currently being fallocated */
        pgoff_t next;           /* the next page offset to be fallocated */
        pgoff_t nr_falloced;    /* how many new pages have been fallocated */
@@ -826,6 +827,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
                        spin_lock(&inode->i_lock);
                        shmem_falloc = inode->i_private;
                        if (shmem_falloc &&
+                           !shmem_falloc->waitq &&
                            index >= shmem_falloc->start &&
                            index < shmem_falloc->next)
                                shmem_falloc->nr_unswapped++;
@@ -1300,6 +1302,64 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        int error;
        int ret = VM_FAULT_LOCKED;
 
+       /*
+        * Trinity finds that probing a hole which tmpfs is punching can
+        * prevent the hole-punch from ever completing: which in turn
+        * locks writers out with its hold on i_mutex.  So refrain from
+        * faulting pages into the hole while it's being punched.  Although
+        * shmem_undo_range() does remove the additions, it may be unable to
+        * keep up, as each new page needs its own unmap_mapping_range() call,
+        * and the i_mmap tree grows ever slower to scan if new vmas are added.
+        *
+        * It does not matter if we sometimes reach this check just before the
+        * hole-punch begins, so that one fault then races with the punch:
+        * we just need to make racing faults a rare case.
+        *
+        * The implementation below would be much simpler if we just used a
+        * standard mutex or completion: but we cannot take i_mutex in fault,
+        * and bloating every shmem inode for this unlikely case would be sad.
+        */
+       if (unlikely(inode->i_private)) {
+               struct shmem_falloc *shmem_falloc;
+
+               spin_lock(&inode->i_lock);
+               shmem_falloc = inode->i_private;
+               if (shmem_falloc &&
+                   shmem_falloc->waitq &&
+                   vmf->pgoff >= shmem_falloc->start &&
+                   vmf->pgoff < shmem_falloc->next) {
+                       wait_queue_head_t *shmem_falloc_waitq;
+                       DEFINE_WAIT(shmem_fault_wait);
+
+                       ret = VM_FAULT_NOPAGE;
+                       if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
+                          !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+                               /* It's polite to up mmap_sem if we can */
+                               up_read(&vma->vm_mm->mmap_sem);
+                               ret = VM_FAULT_RETRY;
+                       }
+
+                       shmem_falloc_waitq = shmem_falloc->waitq;
+                       prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
+                                       TASK_UNINTERRUPTIBLE);
+                       spin_unlock(&inode->i_lock);
+                       schedule();
+
+                       /*
+                        * shmem_falloc_waitq points into the shmem_fallocate()
+                        * stack of the hole-punching task: shmem_falloc_waitq
+                        * is usually invalid by the time we reach here, but
+                        * finish_wait() does not dereference it in that case;
+                        * though i_lock needed lest racing with wake_up_all().
+                        */
+                       spin_lock(&inode->i_lock);
+                       finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
+                       spin_unlock(&inode->i_lock);
+                       return ret;
+               }
+               spin_unlock(&inode->i_lock);
+       }
+
        error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
        if (error)
                return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
@@ -1819,12 +1879,25 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                struct address_space *mapping = file->f_mapping;
                loff_t unmap_start = round_up(offset, PAGE_SIZE);
                loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+               DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
+
+               shmem_falloc.waitq = &shmem_falloc_waitq;
+               shmem_falloc.start = unmap_start >> PAGE_SHIFT;
+               shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
+               spin_lock(&inode->i_lock);
+               inode->i_private = &shmem_falloc;
+               spin_unlock(&inode->i_lock);
 
                if ((u64)unmap_end > (u64)unmap_start)
                        unmap_mapping_range(mapping, unmap_start,
                                            1 + unmap_end - unmap_start, 0);
                shmem_truncate_range(inode, offset, offset + len - 1);
                /* No need to unmap again: hole-punching leaves COWed pages */
+
+               spin_lock(&inode->i_lock);
+               inode->i_private = NULL;
+               wake_up_all(&shmem_falloc_waitq);
+               spin_unlock(&inode->i_lock);
                error = 0;
                goto out;
        }
@@ -1842,6 +1915,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                goto out;
        }
 
+       shmem_falloc.waitq = NULL;
        shmem_falloc.start = start;
        shmem_falloc.next  = start;
        shmem_falloc.nr_falloced = 0;
index d25f293776482f5e88c831c290058f2ca2e6af2b..957c1db6665254645f43d0c04456015a1d4df65c 100644 (file)
 
 static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
 
+static bool nf_generic_should_process(u8 proto)
+{
+       switch (proto) {
+#ifdef CONFIG_NF_CT_PROTO_SCTP_MODULE
+       case IPPROTO_SCTP:
+               return false;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP_MODULE
+       case IPPROTO_DCCP:
+               return false;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE_MODULE
+       case IPPROTO_GRE:
+               return false;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE_MODULE
+       case IPPROTO_UDPLITE:
+               return false;
+#endif
+       default:
+               return true;
+       }
+}
+
 static inline struct nf_generic_net *generic_pernet(struct net *net)
 {
        return &net->ct.nf_ct_proto.generic;
@@ -67,7 +91,7 @@ static int generic_packet(struct nf_conn *ct,
 static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
                        unsigned int dataoff, unsigned int *timeouts)
 {
-       return true;
+       return nf_generic_should_process(nf_ct_protonum(ct));
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
index cef509985192a041f8d437adc35cd540e32f58bd..a05ca5673457fe8fdbad1293d3551958b70e9f4f 100644 (file)
@@ -375,7 +375,7 @@ void sctp_association_free(struct sctp_association *asoc)
        /* Only real associations count against the endpoint, so
         * don't bother for if this is a temporary association.
         */
-       if (!asoc->temp) {
+       if (!list_empty(&asoc->asocs)) {
                list_del(&asoc->asocs);
 
                /* Decrement the backlog value for a TCP-style listening
@@ -1644,6 +1644,8 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
         * ack chunk whose serial number matches that of the request.
         */
        list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
+               if (sctp_chunk_pending(ack))
+                       continue;
                if (ack->subh.addip_hdr->serial == serial) {
                        sctp_chunk_hold(ack);
                        return ack;
index 5856932fdc38906ae78b3da2e170f0699634b24e..560cd418a1813a26b4fa2c0ae104c5ebf7e5df4d 100644 (file)
@@ -141,18 +141,9 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
                } else {
                        /* Nothing to do. Next chunk in the packet, please. */
                        ch = (sctp_chunkhdr_t *) chunk->chunk_end;
-
                        /* Force chunk->skb->data to chunk->chunk_end.  */
-                       skb_pull(chunk->skb,
-                                chunk->chunk_end - chunk->skb->data);
-
-                       /* Verify that we have at least chunk headers
-                        * worth of buffer left.
-                        */
-                       if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
-                               sctp_chunk_free(chunk);
-                               chunk = queue->in_progress = NULL;
-                       }
+                       skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
+                       /* We are guaranteed to pull a SCTP header. */
                }
        }
 
@@ -188,24 +179,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
        skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
        chunk->subh.v = NULL; /* Subheader is no longer valid.  */
 
-       if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) {
+       if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
+           skb_tail_pointer(chunk->skb)) {
                /* This is not a singleton */
                chunk->singleton = 0;
        } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
-               /* RFC 2960, Section 6.10  Bundling
-                *
-                * Partial chunks MUST NOT be placed in an SCTP packet.
-                * If the receiver detects a partial chunk, it MUST drop
-                * the chunk.
-                *
-                * Since the end of the chunk is past the end of our buffer
-                * (which contains the whole packet, we can freely discard
-                * the whole packet.
-                */
-               sctp_chunk_free(chunk);
-               chunk = queue->in_progress = NULL;
-
-               return NULL;
+               /* Discard inside state machine. */
+               chunk->pdiscard = 1;
+               chunk->chunk_end = skb_tail_pointer(chunk->skb);
        } else {
                /* We are at the end of the packet, so mark the chunk
                 * in case we need to send a SACK.
index 26be077b826775ef5d107a29405801bab2efb1e8..1b4d035ed5154625799fee5655f7b87c1aeb0cca 100644 (file)
@@ -3121,50 +3121,63 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
        return SCTP_ERROR_NO_ERROR;
 }
 
-/* Verify the ASCONF packet before we process it.  */
-int sctp_verify_asconf(const struct sctp_association *asoc,
-                      struct sctp_paramhdr *param_hdr, void *chunk_end,
-                      struct sctp_paramhdr **errp) {
-       sctp_addip_param_t *asconf_param;
+/* Verify the ASCONF packet before we process it. */
+bool sctp_verify_asconf(const struct sctp_association *asoc,
+                       struct sctp_chunk *chunk, bool addr_param_needed,
+                       struct sctp_paramhdr **errp)
+{
+       sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr;
        union sctp_params param;
-       int length, plen;
-
-       param.v = (sctp_paramhdr_t *) param_hdr;
-       while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
-               length = ntohs(param.p->length);
-               *errp = param.p;
+       bool addr_param_seen = false;
 
-               if (param.v > chunk_end - length ||
-                   length < sizeof(sctp_paramhdr_t))
-                       return 0;
+       sctp_walk_params(param, addip, addip_hdr.params) {
+               size_t length = ntohs(param.p->length);
 
+               *errp = param.p;
                switch (param.p->type) {
+               case SCTP_PARAM_ERR_CAUSE:
+                       break;
+               case SCTP_PARAM_IPV4_ADDRESS:
+                       if (length != sizeof(sctp_ipv4addr_param_t))
+                               return false;
+                       addr_param_seen = true;
+                       break;
+               case SCTP_PARAM_IPV6_ADDRESS:
+                       if (length != sizeof(sctp_ipv6addr_param_t))
+                               return false;
+                       addr_param_seen = true;
+                       break;
                case SCTP_PARAM_ADD_IP:
                case SCTP_PARAM_DEL_IP:
                case SCTP_PARAM_SET_PRIMARY:
-                       asconf_param = (sctp_addip_param_t *)param.v;
-                       plen = ntohs(asconf_param->param_hdr.length);
-                       if (plen < sizeof(sctp_addip_param_t) +
-                           sizeof(sctp_paramhdr_t))
-                               return 0;
+                       /* In ASCONF chunks, these need to be first. */
+                       if (addr_param_needed && !addr_param_seen)
+                               return false;
+                       length = ntohs(param.addip->param_hdr.length);
+                       if (length < sizeof(sctp_addip_param_t) +
+                                    sizeof(sctp_paramhdr_t))
+                               return false;
                        break;
                case SCTP_PARAM_SUCCESS_REPORT:
                case SCTP_PARAM_ADAPTATION_LAYER_IND:
                        if (length != sizeof(sctp_addip_param_t))
-                               return 0;
-
+                               return false;
                        break;
                default:
-                       break;
+                       /* This is unkown to us, reject! */
+                       return false;
                }
-
-               param.v += WORD_ROUND(length);
        }
 
-       if (param.v != chunk_end)
-               return 0;
+       /* Remaining sanity checks. */
+       if (addr_param_needed && !addr_param_seen)
+               return false;
+       if (!addr_param_needed && addr_param_seen)
+               return false;
+       if (param.v != chunk->chunk_end)
+               return false;
 
-       return 1;
+       return true;
 }
 
 /* Process an incoming ASCONF chunk with the next expected serial no. and
@@ -3173,16 +3186,17 @@ int sctp_verify_asconf(const struct sctp_association *asoc,
 struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
                                       struct sctp_chunk *asconf)
 {
+       sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr;
+       bool all_param_pass = true;
+       union sctp_params param;
        sctp_addiphdr_t         *hdr;
        union sctp_addr_param   *addr_param;
        sctp_addip_param_t      *asconf_param;
        struct sctp_chunk       *asconf_ack;
-
        __be16  err_code;
        int     length = 0;
        int     chunk_len;
        __u32   serial;
-       int     all_param_pass = 1;
 
        chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
        hdr = (sctp_addiphdr_t *)asconf->skb->data;
@@ -3210,9 +3224,14 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
                goto done;
 
        /* Process the TLVs contained within the ASCONF chunk. */
-       while (chunk_len > 0) {
+       sctp_walk_params(param, addip, addip_hdr.params) {
+               /* Skip preceeding address parameters. */
+               if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
+                   param.p->type == SCTP_PARAM_IPV6_ADDRESS)
+                       continue;
+
                err_code = sctp_process_asconf_param(asoc, asconf,
-                                                    asconf_param);
+                                                    param.addip);
                /* ADDIP 4.1 A7)
                 * If an error response is received for a TLV parameter,
                 * all TLVs with no response before the failed TLV are
@@ -3220,28 +3239,20 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
                 * the failed response are considered unsuccessful unless
                 * a specific success indication is present for the parameter.
                 */
-               if (SCTP_ERROR_NO_ERROR != err_code)
-                       all_param_pass = 0;
-
+               if (err_code != SCTP_ERROR_NO_ERROR)
+                       all_param_pass = false;
                if (!all_param_pass)
-                       sctp_add_asconf_response(asconf_ack,
-                                                asconf_param->crr_id, err_code,
-                                                asconf_param);
+                       sctp_add_asconf_response(asconf_ack, param.addip->crr_id,
+                                                err_code, param.addip);
 
                /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
                 * an IP address sends an 'Out of Resource' in its response, it
                 * MUST also fail any subsequent add or delete requests bundled
                 * in the ASCONF.
                 */
-               if (SCTP_ERROR_RSRC_LOW == err_code)
+               if (err_code == SCTP_ERROR_RSRC_LOW)
                        goto done;
-
-               /* Move to the next ASCONF param. */
-               length = ntohs(asconf_param->param_hdr.length);
-               asconf_param = (void *)asconf_param + length;
-               chunk_len -= length;
        }
-
 done:
        asoc->peer.addip_serial++;
 
index 0a5f0508c43a3adf6aca31daa9066a3733f385c2..6c352b39d16302c77bc366d05c48c3ce2811a88e 100644 (file)
@@ -171,6 +171,9 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
 {
        __u16 chunk_length = ntohs(chunk->chunk_hdr->length);
 
+       /* Previously already marked? */
+       if (unlikely(chunk->pdiscard))
+               return 0;
        if (unlikely(chunk_length < required_length))
                return 0;
 
@@ -3579,9 +3582,7 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
        struct sctp_chunk       *asconf_ack = NULL;
        struct sctp_paramhdr    *err_param = NULL;
        sctp_addiphdr_t         *hdr;
-       union sctp_addr_param   *addr_param;
        __u32                   serial;
-       int                     length;
 
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
@@ -3606,17 +3607,8 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
        hdr = (sctp_addiphdr_t *)chunk->skb->data;
        serial = ntohl(hdr->serial);
 
-       addr_param = (union sctp_addr_param *)hdr->params;
-       length = ntohs(addr_param->p.length);
-       if (length < sizeof(sctp_paramhdr_t))
-               return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
-                          (void *)addr_param, commands);
-
        /* Verify the ASCONF chunk before processing it. */
-       if (!sctp_verify_asconf(asoc,
-                           (sctp_paramhdr_t *)((void *)addr_param + length),
-                           (void *)chunk->chunk_end,
-                           &err_param))
+       if (!sctp_verify_asconf(asoc, chunk, true, &err_param))
                return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                                                  (void *)err_param, commands);
 
@@ -3734,10 +3726,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
        rcvd_serial = ntohl(addip_hdr->serial);
 
        /* Verify the ASCONF-ACK chunk before processing it. */
-       if (!sctp_verify_asconf(asoc,
-           (sctp_paramhdr_t *)addip_hdr->params,
-           (void *)asconf_ack->chunk_end,
-           &err_param))
+       if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param))
                return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                           (void *)err_param, commands);
 
index ac75e7c87931ff2c46eb1aac62e1d7b88df52c4e..10685b6d8cf1527a5221a9ff5772d4c73b766780 100644 (file)
@@ -1,3 +1,9 @@
+* 3.12.1
+- workaround for qHD (540x960) video mode
+== GiWoong Kim <giwoong.kim@samsung.com> 2015-03-12
+* 2.0.21
+- brillcodec: modify debugging method.
+== Kitae Kim <kt920.kim@samsung.com> 2015-01-20
 * 2.0.20
 - smack: [PATCH] Fix a bidirectional UDS connect check
 - enable smp feature
index cce09510d20525153ac5e125ad04e26fa0c5fdb1..9bffe3e9ec85c68aab0e29e7d2dd04ac8362f904 100644 (file)
@@ -1,4 +1,4 @@
-Version: 2.0.20
+Version: 3.12.1
 Maintainer: Yeong-Kyoon, Lee <yeongkyoon.lee@samsung.com>
 Source: emulator-kernel
 
index 5514cbc2f691667444ebf72d9c38310dc2be3408..bbe6bdb3c96808f3c2a7aa1790286c0efe386eca 100644 (file)
@@ -15,7 +15,7 @@
 Name: emulator-kernel
 Summary: The Linux Emulator Kernel
 Version: 3.12.18
-Release: 4
+Release: 5
 License: GPL-2.0
 Group: System Environment/Kernel
 Vendor: The Linux Community
@@ -28,6 +28,7 @@ ExclusiveArch: %{ix86}
 
 #BuildRequires: linux-glibc-devel
 #BuildRequires: bc
+BuildRequires: emulator-kernel-user-headers
 
 Provides: kernel = %{version}-%{release}
 Provides: kernel-uname-r = %{fullVersion}
index d67c97bb10256d5dc5a9b74b3b8aaa37022f96b1..797818695c87a93855ad54a0b86dcbb194ab5a31 100644 (file)
@@ -201,12 +201,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
                if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
                        atomic_dec(&key->user->nikeys);
 
-               key_user_put(key->user);
-
                /* now throw away the key memory */
                if (key->type->destroy)
                        key->type->destroy(key);
 
+               key_user_put(key->user);
+
                kfree(key->description);
 
 #ifdef KEY_DEBUGGING
index d8aa206e8bdece19a337175ee1e4a9f83b8e8569..98a29b26c5f41d0448177aaab2c0bd561cc70e2b 100644 (file)
@@ -289,6 +289,10 @@ static bool snd_ctl_remove_numid_conflict(struct snd_card *card,
 {
        struct snd_kcontrol *kctl;
 
+       /* Make sure that the ids assigned to the control do not wrap around */
+       if (card->last_numid >= UINT_MAX - count)
+               card->last_numid = 0;
+
        list_for_each_entry(kctl, &card->controls, list) {
                if (kctl->id.numid < card->last_numid + 1 + count &&
                    kctl->id.numid + kctl->count > card->last_numid + 1) {
@@ -331,6 +335,7 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
 {
        struct snd_ctl_elem_id id;
        unsigned int idx;
+       unsigned int count;
        int err = -EINVAL;
 
        if (! kcontrol)
@@ -338,6 +343,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
        if (snd_BUG_ON(!card || !kcontrol->info))
                goto error;
        id = kcontrol->id;
+       if (id.index > UINT_MAX - kcontrol->count)
+               goto error;
+
        down_write(&card->controls_rwsem);
        if (snd_ctl_find_id(card, &id)) {
                up_write(&card->controls_rwsem);
@@ -359,8 +367,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
        card->controls_count += kcontrol->count;
        kcontrol->id.numid = card->last_numid + 1;
        card->last_numid += kcontrol->count;
+       count = kcontrol->count;
        up_write(&card->controls_rwsem);
-       for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
+       for (idx = 0; idx < count; idx++, id.index++, id.numid++)
                snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
        return 0;
 
@@ -389,6 +398,7 @@ int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol,
                    bool add_on_replace)
 {
        struct snd_ctl_elem_id id;
+       unsigned int count;
        unsigned int idx;
        struct snd_kcontrol *old;
        int ret;
@@ -424,8 +434,9 @@ add:
        card->controls_count += kcontrol->count;
        kcontrol->id.numid = card->last_numid + 1;
        card->last_numid += kcontrol->count;
+       count = kcontrol->count;
        up_write(&card->controls_rwsem);
-       for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
+       for (idx = 0; idx < count; idx++, id.index++, id.numid++)
                snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
        return 0;
 
@@ -898,9 +909,9 @@ static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file,
                        result = kctl->put(kctl, control);
                }
                if (result > 0) {
+                       struct snd_ctl_elem_id id = control->id;
                        up_read(&card->controls_rwsem);
-                       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
-                                      &control->id);
+                       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
                        return 0;
                }
        }
@@ -992,6 +1003,7 @@ static int snd_ctl_elem_unlock(struct snd_ctl_file *file,
 
 struct user_element {
        struct snd_ctl_elem_info info;
+       struct snd_card *card;
        void *elem_data;                /* element data */
        unsigned long elem_data_size;   /* size of element data in bytes */
        void *tlv_data;                 /* TLV data */
@@ -1035,7 +1047,9 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
 {
        struct user_element *ue = kcontrol->private_data;
 
+       mutex_lock(&ue->card->user_ctl_lock);
        memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size);
+       mutex_unlock(&ue->card->user_ctl_lock);
        return 0;
 }
 
@@ -1044,10 +1058,12 @@ static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
 {
        int change;
        struct user_element *ue = kcontrol->private_data;
-       
+
+       mutex_lock(&ue->card->user_ctl_lock);
        change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0;
        if (change)
                memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size);
+       mutex_unlock(&ue->card->user_ctl_lock);
        return change;
 }
 
@@ -1067,19 +1083,32 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
                new_data = memdup_user(tlv, size);
                if (IS_ERR(new_data))
                        return PTR_ERR(new_data);
+               mutex_lock(&ue->card->user_ctl_lock);
                change = ue->tlv_data_size != size;
                if (!change)
                        change = memcmp(ue->tlv_data, new_data, size);
                kfree(ue->tlv_data);
                ue->tlv_data = new_data;
                ue->tlv_data_size = size;
+               mutex_unlock(&ue->card->user_ctl_lock);
        } else {
-               if (! ue->tlv_data_size || ! ue->tlv_data)
-                       return -ENXIO;
-               if (size < ue->tlv_data_size)
-                       return -ENOSPC;
+               int ret = 0;
+
+               mutex_lock(&ue->card->user_ctl_lock);
+               if (!ue->tlv_data_size || !ue->tlv_data) {
+                       ret = -ENXIO;
+                       goto err_unlock;
+               }
+               if (size < ue->tlv_data_size) {
+                       ret = -ENOSPC;
+                       goto err_unlock;
+               }
                if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
-                       return -EFAULT;
+                       ret = -EFAULT;
+err_unlock:
+               mutex_unlock(&ue->card->user_ctl_lock);
+               if (ret)
+                       return ret;
        }
        return change;
 }
@@ -1137,8 +1166,6 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
        struct user_element *ue;
        int idx, err;
 
-       if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS)
-               return -ENOMEM;
        if (info->count < 1)
                return -EINVAL;
        access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
@@ -1147,21 +1174,16 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
                                 SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
        info->id.numid = 0;
        memset(&kctl, 0, sizeof(kctl));
-       down_write(&card->controls_rwsem);
-       _kctl = snd_ctl_find_id(card, &info->id);
-       err = 0;
-       if (_kctl) {
-               if (replace)
-                       err = snd_ctl_remove(card, _kctl);
-               else
-                       err = -EBUSY;
-       } else {
-               if (replace)
-                       err = -ENOENT;
+
+       if (replace) {
+               err = snd_ctl_remove_user_ctl(file, &info->id);
+               if (err)
+                       return err;
        }
-       up_write(&card->controls_rwsem);
-       if (err < 0)
-               return err;
+
+       if (card->user_ctl_count >= MAX_USER_CONTROLS)
+               return -ENOMEM;
+
        memcpy(&kctl.id, &info->id, sizeof(info->id));
        kctl.count = info->owner ? info->owner : 1;
        access |= SNDRV_CTL_ELEM_ACCESS_USER;
@@ -1211,6 +1233,7 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
        ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
        if (ue == NULL)
                return -ENOMEM;
+       ue->card = card;
        ue->info = *info;
        ue->info.access = 0;
        ue->elem_data = (char *)ue + sizeof(*ue);
@@ -1322,8 +1345,9 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
                }
                err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
                if (err > 0) {
+                       struct snd_ctl_elem_id id = kctl->id;
                        up_read(&card->controls_rwsem);
-                       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id);
+                       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id);
                        return 0;
                }
        } else {
index d04785144601dd460d3ad9bf86fbe9869e9c6827..b9268a55126bc59648d65dcb6f7e5c0274b829c4 100644 (file)
@@ -215,6 +215,7 @@ int snd_card_create(int idx, const char *xid,
        INIT_LIST_HEAD(&card->devices);
        init_rwsem(&card->controls_rwsem);
        rwlock_init(&card->ctl_files_rwlock);
+       mutex_init(&card->user_ctl_lock);
        INIT_LIST_HEAD(&card->controls);
        INIT_LIST_HEAD(&card->ctl_files);
        spin_lock_init(&card->files_lock);
index 9f3eae2909009517cb96de2f882121883f43444e..2d9ab9417289d1ea10f4ecfa78b57fd6ce530d80 100644 (file)
@@ -4,6 +4,7 @@ TARGETS += efivarfs
 TARGETS += kcmp
 TARGETS += memory-hotplug
 TARGETS += mqueue
+TARGETS += mount
 TARGETS += net
 TARGETS += ptrace
 TARGETS += timers
diff --git a/tools/testing/selftests/mount/Makefile b/tools/testing/selftests/mount/Makefile
new file mode 100644 (file)
index 0000000..337d853
--- /dev/null
@@ -0,0 +1,17 @@
+# Makefile for mount selftests.
+
+all: unprivileged-remount-test
+
+unprivileged-remount-test: unprivileged-remount-test.c
+       gcc -Wall -O2 unprivileged-remount-test.c -o unprivileged-remount-test
+
+# Allow specific tests to be selected.
+test_unprivileged_remount: unprivileged-remount-test
+       @if [ -f /proc/self/uid_map ] ; then ./unprivileged-remount-test ; fi
+
+run_tests: all test_unprivileged_remount
+
+clean:
+       rm -f unprivileged-remount-test
+
+.PHONY: all test_unprivileged_remount
diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c
new file mode 100644 (file)
index 0000000..1b3ff2f
--- /dev/null
@@ -0,0 +1,242 @@
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/mount.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <grp.h>
+#include <stdbool.h>
+#include <stdarg.h>
+
+#ifndef CLONE_NEWNS
+# define CLONE_NEWNS 0x00020000
+#endif
+#ifndef CLONE_NEWUTS
+# define CLONE_NEWUTS 0x04000000
+#endif
+#ifndef CLONE_NEWIPC
+# define CLONE_NEWIPC 0x08000000
+#endif
+#ifndef CLONE_NEWNET
+# define CLONE_NEWNET 0x40000000
+#endif
+#ifndef CLONE_NEWUSER
+# define CLONE_NEWUSER 0x10000000
+#endif
+#ifndef CLONE_NEWPID
+# define CLONE_NEWPID 0x20000000
+#endif
+
+#ifndef MS_RELATIME
+#define MS_RELATIME (1 << 21)
+#endif
+#ifndef MS_STRICTATIME
+#define MS_STRICTATIME (1 << 24)
+#endif
+
+static void die(char *fmt, ...)
+{
+       va_list ap;
+       va_start(ap, fmt);
+       vfprintf(stderr, fmt, ap);
+       va_end(ap);
+       exit(EXIT_FAILURE);
+}
+
+static void write_file(char *filename, char *fmt, ...)
+{
+       char buf[4096];
+       int fd;
+       ssize_t written;
+       int buf_len;
+       va_list ap;
+
+       va_start(ap, fmt);
+       buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
+       va_end(ap);
+       if (buf_len < 0) {
+               die("vsnprintf failed: %s\n",
+                   strerror(errno));
+       }
+       if (buf_len >= sizeof(buf)) {
+               die("vsnprintf output truncated\n");
+       }
+
+       fd = open(filename, O_WRONLY);
+       if (fd < 0) {
+               die("open of %s failed: %s\n",
+                   filename, strerror(errno));
+       }
+       written = write(fd, buf, buf_len);
+       if (written != buf_len) {
+               if (written >= 0) {
+                       die("short write to %s\n", filename);
+               } else {
+                       die("write to %s failed: %s\n",
+                               filename, strerror(errno));
+               }
+       }
+       if (close(fd) != 0) {
+               die("close of %s failed: %s\n",
+                       filename, strerror(errno));
+       }
+}
+
+static void create_and_enter_userns(void)
+{
+       uid_t uid;
+       gid_t gid;
+
+       uid = getuid();
+       gid = getgid();
+
+       if (unshare(CLONE_NEWUSER) !=0) {
+               die("unshare(CLONE_NEWUSER) failed: %s\n",
+                       strerror(errno));
+       }
+
+       write_file("/proc/self/uid_map", "0 %d 1", uid);
+       write_file("/proc/self/gid_map", "0 %d 1", gid);
+
+       if (setgroups(0, NULL) != 0) {
+               die("setgroups failed: %s\n",
+                       strerror(errno));
+       }
+       if (setgid(0) != 0) {
+               die ("setgid(0) failed %s\n",
+                       strerror(errno));
+       }
+       if (setuid(0) != 0) {
+               die("setuid(0) failed %s\n",
+                       strerror(errno));
+       }
+}
+
+static
+bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
+{
+       pid_t child;
+
+       child = fork();
+       if (child == -1) {
+               die("fork failed: %s\n",
+                       strerror(errno));
+       }
+       if (child != 0) { /* parent */
+               pid_t pid;
+               int status;
+               pid = waitpid(child, &status, 0);
+               if (pid == -1) {
+                       die("waitpid failed: %s\n",
+                               strerror(errno));
+               }
+               if (pid != child) {
+                       die("waited for %d got %d\n",
+                               child, pid);
+               }
+               if (!WIFEXITED(status)) {
+                       die("child did not terminate cleanly\n");
+               }
+               return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
+       }
+
+       create_and_enter_userns();
+       if (unshare(CLONE_NEWNS) != 0) {
+               die("unshare(CLONE_NEWNS) failed: %s\n",
+                       strerror(errno));
+       }
+
+       if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) {
+               die("mount of /tmp failed: %s\n",
+                       strerror(errno));
+       }
+
+       create_and_enter_userns();
+
+       if (unshare(CLONE_NEWNS) != 0) {
+               die("unshare(CLONE_NEWNS) failed: %s\n",
+                       strerror(errno));
+       }
+
+       if (mount("/tmp", "/tmp", "none",
+                 MS_REMOUNT | MS_BIND | remount_flags, NULL) != 0) {
+               /* system("cat /proc/self/mounts"); */
+               die("remount of /tmp failed: %s\n",
+                   strerror(errno));
+       }
+
+       if (mount("/tmp", "/tmp", "none",
+                 MS_REMOUNT | MS_BIND | invalid_flags, NULL) == 0) {
+               /* system("cat /proc/self/mounts"); */
+               die("remount of /tmp with invalid flags "
+                   "succeeded unexpectedly\n");
+       }
+       exit(EXIT_SUCCESS);
+}
+
+static bool test_unpriv_remount_simple(int mount_flags)
+{
+       return test_unpriv_remount(mount_flags, mount_flags, 0);
+}
+
+static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags)
+{
+       return test_unpriv_remount(mount_flags, mount_flags, invalid_flags);
+}
+
+int main(int argc, char **argv)
+{
+       if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) {
+               die("MS_RDONLY malfunctions\n");
+       }
+       if (!test_unpriv_remount_simple(MS_NODEV)) {
+               die("MS_NODEV malfunctions\n");
+       }
+       if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) {
+               die("MS_NOSUID malfunctions\n");
+       }
+       if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) {
+               die("MS_NOEXEC malfunctions\n");
+       }
+       if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV,
+                                      MS_NOATIME|MS_NODEV))
+       {
+               die("MS_RELATIME malfunctions\n");
+       }
+       if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV,
+                                      MS_NOATIME|MS_NODEV))
+       {
+               die("MS_STRICTATIME malfunctions\n");
+       }
+       if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV,
+                                      MS_STRICTATIME|MS_NODEV))
+       {
+               die("MS_RELATIME malfunctions\n");
+       }
+       if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV,
+                                      MS_NOATIME|MS_NODEV))
+       {
+               die("MS_RELATIME malfunctions\n");
+       }
+       if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV,
+                                      MS_NOATIME|MS_NODEV))
+       {
+               die("MS_RELATIME malfunctions\n");
+       }
+       if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV,
+                                      MS_STRICTATIME|MS_NODEV))
+       {
+               die("MS_RELATIME malfunctions\n");
+       }
+       if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV,
+                                MS_NOATIME|MS_NODEV))
+       {
+               die("Default atime malfunctions\n");
+       }
+       return EXIT_SUCCESS;
+}