Merge branch 'fixes' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 11 Sep 2011 00:28:46 +0000 (17:28 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 11 Sep 2011 00:28:46 +0000 (17:28 -0700)
* 'fixes' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur/linux-2.6-arm:
  ARM: 7088/1: entry: fix wrong parameter name used in do_thumb_abort
  ARM: 7080/1: l2x0: make sure I&D are not locked down on init
  ARM: 7081/1: mach-integrator: fix the clocksource
  NET: am79c961: fix race in link status code
  ARM: 7067/1: mm: keep significant bits in pfn_valid

75 files changed:
Documentation/feature-removal-schedule.txt
Documentation/hwmon/max16065
Makefile
arch/powerpc/boot/dts/p1023rds.dts
arch/powerpc/configs/85xx/p1023rds_defconfig
arch/powerpc/configs/corenet32_smp_defconfig
arch/powerpc/configs/corenet64_smp_defconfig
arch/powerpc/configs/mpc85xx_defconfig
arch/powerpc/configs/mpc85xx_smp_defconfig
arch/x86/include/asm/pvclock.h
arch/x86/kernel/cpu/perf_event.c
arch/x86/pci/acpi.c
arch/x86/xen/setup.c
arch/x86/xen/smp.c
arch/x86/xen/xen-asm_32.S
drivers/base/regmap/regmap.c
drivers/dma/ste_dma40.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/radeon_clocks.c
drivers/hid/hid-ids.h
drivers/hid/hid-magicmouse.c
drivers/hid/hid-wacom.c
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/max16065.c
drivers/hwmon/pmbus/ucd9000.c
drivers/hwmon/pmbus/ucd9200.c
drivers/i2c/busses/i2c-pxa-pci.c
drivers/i2c/busses/i2c-tegra.c
drivers/iommu/amd_iommu.c
drivers/md/linear.h
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/core/host.h
drivers/mmc/core/sd.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sh_mobile_sdhi.c
drivers/mtd/ubi/debug.h
drivers/pci/hotplug/pcihp_slot.c
drivers/pci/pci.c
drivers/pci/probe.c
drivers/rtc/rtc-ep93xx.c
drivers/rtc/rtc-lib.c
drivers/rtc/rtc-twl.c
drivers/video/backlight/backlight.c
fs/9p/v9fs_vfs.h
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/9p/vfs_super.c
fs/block_dev.c
fs/ceph/mds_client.c
fs/ceph/super.c
fs/ext4/ext4.h
fs/ext4/inode.c
fs/ext4/page-io.c
fs/namei.c
fs/ubifs/debug.h
fs/xfs/xfs_iops.c
fs/xfs/xfs_super.c
include/linux/perf_event.h
include/linux/regulator/consumer.h
include/net/9p/9p.h
include/net/cfg80211.h
kernel/events/core.c
kernel/sched.c
kernel/time/alarmtimer.c
net/9p/trans_virtio.c
net/ceph/msgpool.c
net/ceph/osd_client.c

index c4a6e14..4dc4654 100644 (file)
@@ -592,3 +592,11 @@ Why:    In 3.0, we can now autodetect internal 3G device and already have
        interface that was used by acer-wmi driver. It will replaced by
        information log when acer-wmi initial.
 Who:    Lee, Chun-Yi <jlee@novell.com>
+
+----------------------------
+What:  The XFS nodelaylog mount option
+When:  3.3
+Why:   The delaylog mode that has been the default since 2.6.39 has proven
+       stable, and the old code is in the way of additional improvements in
+       the log code.
+Who:   Christoph Hellwig <hch@lst.de>
index 44b4f61..c11f64a 100644 (file)
@@ -62,6 +62,13 @@ can be safely used to identify the chip. You will have to instantiate
 the devices explicitly. Please see Documentation/i2c/instantiating-devices for
 details.
 
+WARNING: Do not access chip registers using the i2cdump command, and do not use
+any of the i2ctools commands on a command register (0xa5 to 0xac). The chips
+supported by this driver interpret any access to a command register (including
+read commands) as request to execute the command in question. This may result in
+power loss, board resets, and/or Flash corruption. Worst case, your board may
+turn into a brick.
+
 
 Sysfs entries
 -------------
index c3e90c5..03d97aa 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 1
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
 NAME = "Divemaster Edition"
 
 # *DOCUMENTATION*
index bfa96aa..d9b7767 100644 (file)
                        #size-cells = <1>;
                        compatible = "cfi-flash";
                        reg = <0x0 0x0 0x02000000>;
-                       bank-width = <1>;
+                       bank-width = <2>;
                        device-width = <1>;
                        partition@0 {
                                label = "ramdisk";
index 980ff8f..3ff5a81 100644 (file)
@@ -171,3 +171,4 @@ CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_SHA512=y
 CONFIG_CRYPTO_AES=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_FSL_CAAM=y
index 10562a5..4311d02 100644 (file)
@@ -185,3 +185,4 @@ CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_SHA512=y
 CONFIG_CRYPTO_AES=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_FSL_CAAM=y
index d322835..c92c204 100644 (file)
@@ -100,5 +100,8 @@ CONFIG_DEBUG_INFO=y
 CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_VIRQ_DEBUG=y
 CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_AES=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_TALITOS=y
+CONFIG_CRYPTO_DEV_FSL_CAAM=y
index fcd85d2..a3467bf 100644 (file)
@@ -139,6 +139,7 @@ CONFIG_SND=y
 CONFIG_SND_INTEL8X0=y
 # CONFIG_SND_PPC is not set
 # CONFIG_SND_USB is not set
+CONFIG_SND_SOC=y
 CONFIG_HID_A4TECH=y
 CONFIG_HID_APPLE=y
 CONFIG_HID_BELKIN=y
index 908c941..9693f6e 100644 (file)
@@ -140,6 +140,7 @@ CONFIG_SND=y
 CONFIG_SND_INTEL8X0=y
 # CONFIG_SND_PPC is not set
 # CONFIG_SND_USB is not set
+CONFIG_SND_SOC=y
 CONFIG_HID_A4TECH=y
 CONFIG_HID_APPLE=y
 CONFIG_HID_BELKIN=y
index a518c0a..c59cc97 100644 (file)
@@ -44,7 +44,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
                : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
 #elif defined(__x86_64__)
        __asm__ (
-               "mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
+               "mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
                : [lo]"=a"(product),
                  [hi]"=d"(tmp)
                : "0"(delta),
index 4ee3abf..cfa62ec 100644 (file)
@@ -1900,6 +1900,9 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 
        perf_callchain_store(entry, regs->ip);
 
+       if (!current->mm)
+               return;
+
        if (perf_callchain_user32(regs, entry))
                return;
 
index c953302..039d913 100644 (file)
@@ -365,8 +365,13 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
         */
        if (bus) {
                struct pci_bus *child;
-               list_for_each_entry(child, &bus->children, node)
-                       pcie_bus_configure_settings(child, child->self->pcie_mpss);
+               list_for_each_entry(child, &bus->children, node) {
+                       struct pci_dev *self = child->self;
+                       if (!self)
+                               continue;
+
+                       pcie_bus_configure_settings(child, self->pcie_mpss);
+               }
        }
 
        if (!bus)
index df118a8..c3b8d44 100644 (file)
@@ -184,6 +184,19 @@ static unsigned long __init xen_set_identity(const struct e820entry *list,
                                        PFN_UP(start_pci), PFN_DOWN(last));
        return identity;
 }
+
+static unsigned long __init xen_get_max_pages(void)
+{
+       unsigned long max_pages = MAX_DOMAIN_PAGES;
+       domid_t domid = DOMID_SELF;
+       int ret;
+
+       ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
+       if (ret > 0)
+               max_pages = ret;
+       return min(max_pages, MAX_DOMAIN_PAGES);
+}
+
 /**
  * machine_specific_memory_setup - Hook for machine specific memory setup.
  **/
@@ -292,6 +305,12 @@ char * __init xen_memory_setup(void)
 
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 
+       extra_limit = xen_get_max_pages();
+       if (extra_limit >= max_pfn)
+               extra_pages = extra_limit - max_pfn;
+       else
+               extra_pages = 0;
+
        extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
 
        /*
index e79dbb9..d4fc6d4 100644 (file)
@@ -32,6 +32,7 @@
 #include <xen/page.h>
 #include <xen/events.h>
 
+#include <xen/hvc-console.h>
 #include "xen-ops.h"
 #include "mmu.h"
 
@@ -207,6 +208,15 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
        unsigned cpu;
        unsigned int i;
 
+       if (skip_ioapic_setup) {
+               char *m = (max_cpus == 0) ?
+                       "The nosmp parameter is incompatible with Xen; " \
+                       "use Xen dom0_max_vcpus=1 parameter" :
+                       "The noapic parameter is incompatible with Xen";
+
+               xen_raw_printk(m);
+               panic(m);
+       }
        xen_init_lock_cpu(0);
 
        smp_store_cpu_info(0);
index 22a2093..b040b0e 100644 (file)
@@ -113,11 +113,13 @@ xen_iret_start_crit:
 
        /*
         * If there's something pending, mask events again so we can
-        * jump back into xen_hypervisor_callback
+        * jump back into xen_hypervisor_callback. Otherwise do not
+        * touch XEN_vcpu_info_mask.
         */
-       sete XEN_vcpu_info_mask(%eax)
+       jne 1f
+       movb $1, XEN_vcpu_info_mask(%eax)
 
-       popl %eax
+1:     popl %eax
 
        /*
         * From this point on the registers are restored and the stack
index 0eef4da..20663f8 100644 (file)
@@ -168,13 +168,11 @@ struct regmap *regmap_init(struct device *dev,
        map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
        if (map->work_buf == NULL) {
                ret = -ENOMEM;
-               goto err_bus;
+               goto err_map;
        }
 
        return map;
 
-err_bus:
-       module_put(map->bus->owner);
 err_map:
        kfree(map);
 err:
@@ -188,7 +186,6 @@ EXPORT_SYMBOL_GPL(regmap_init);
 void regmap_exit(struct regmap *map)
 {
        kfree(map->work_buf);
-       module_put(map->bus->owner);
        kfree(map);
 }
 EXPORT_SYMBOL_GPL(regmap_exit);
index cd3a7c7..467e4dc 100644 (file)
@@ -174,8 +174,10 @@ struct d40_base;
  * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
  * transfer and call client callback.
  * @client: Cliented owned descriptor list.
+ * @pending_queue: Submitted jobs, to be issued by issue_pending()
  * @active: Active descriptor.
  * @queue: Queued jobs.
+ * @prepare_queue: Prepared jobs.
  * @dma_cfg: The client configuration of this dma channel.
  * @configured: whether the dma_cfg configuration is valid
  * @base: Pointer to the device instance struct.
@@ -203,6 +205,7 @@ struct d40_chan {
        struct list_head                 pending_queue;
        struct list_head                 active;
        struct list_head                 queue;
+       struct list_head                 prepare_queue;
        struct stedma40_chan_cfg         dma_cfg;
        bool                             configured;
        struct d40_base                 *base;
@@ -477,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
 
                list_for_each_entry_safe(d, _d, &d40c->client, node)
                        if (async_tx_test_ack(&d->txd)) {
-                               d40_pool_lli_free(d40c, d);
                                d40_desc_remove(d);
                                desc = d;
                                memset(desc, 0, sizeof(*desc));
@@ -644,8 +646,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
        return d;
 }
 
+/* remove desc from current queue and add it to the pending_queue */
 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
 {
+       d40_desc_remove(desc);
+       desc->is_in_client_list = false;
        list_add_tail(&desc->node, &d40c->pending_queue);
 }
 
@@ -803,6 +808,7 @@ done:
 static void d40_term_all(struct d40_chan *d40c)
 {
        struct d40_desc *d40d;
+       struct d40_desc *_d;
 
        /* Release active descriptors */
        while ((d40d = d40_first_active_get(d40c))) {
@@ -822,6 +828,21 @@ static void d40_term_all(struct d40_chan *d40c)
                d40_desc_free(d40c, d40d);
        }
 
+       /* Release client owned descriptors */
+       if (!list_empty(&d40c->client))
+               list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
+                       d40_desc_remove(d40d);
+                       d40_desc_free(d40c, d40d);
+               }
+
+       /* Release descriptors in prepare queue */
+       if (!list_empty(&d40c->prepare_queue))
+               list_for_each_entry_safe(d40d, _d,
+                                        &d40c->prepare_queue, node) {
+                       d40_desc_remove(d40d);
+                       d40_desc_free(d40c, d40d);
+               }
+
        d40c->pending_tx = 0;
        d40c->busy = false;
 }
@@ -1208,7 +1229,6 @@ static void dma_tasklet(unsigned long data)
 
        if (!d40d->cyclic) {
                if (async_tx_test_ack(&d40d->txd)) {
-                       d40_pool_lli_free(d40c, d40d);
                        d40_desc_remove(d40d);
                        d40_desc_free(d40c, d40d);
                } else {
@@ -1595,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c)
        u32 event;
        struct d40_phy_res *phy = d40c->phy_chan;
        bool is_src;
-       struct d40_desc *d;
-       struct d40_desc *_d;
-
 
        /* Terminate all queued and active transfers */
        d40_term_all(d40c);
 
-       /* Release client owned descriptors */
-       if (!list_empty(&d40c->client))
-               list_for_each_entry_safe(d, _d, &d40c->client, node) {
-                       d40_pool_lli_free(d40c, d);
-                       d40_desc_remove(d);
-                       d40_desc_free(d40c, d);
-               }
-
        if (phy == NULL) {
                chan_err(d40c, "phy == null\n");
                return -EINVAL;
@@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
                goto err;
        }
 
+       /*
+        * add descriptor to the prepare queue in order to be able
+        * to free them later in terminate_all
+        */
+       list_add_tail(&desc->node, &chan->prepare_queue);
+
        spin_unlock_irqrestore(&chan->lock, flags);
 
        return &desc->txd;
@@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
                INIT_LIST_HEAD(&d40c->queue);
                INIT_LIST_HEAD(&d40c->pending_queue);
                INIT_LIST_HEAD(&d40c->client);
+               INIT_LIST_HEAD(&d40c->prepare_queue);
 
                tasklet_init(&d40c->tasklet, dma_tasklet,
                             (unsigned long) d40c);
index d8d71a3..dc0a5b5 100644 (file)
@@ -41,6 +41,31 @@ static void evergreen_gpu_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
 static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
 
+void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
+{
+       u16 ctl, v;
+       int cap, err;
+
+       cap = pci_pcie_cap(rdev->pdev);
+       if (!cap)
+               return;
+
+       err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl);
+       if (err)
+               return;
+
+       v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
+
+       /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
+        * to avoid hangs or perfomance issues
+        */
+       if ((v == 0) || (v == 6) || (v == 7)) {
+               ctl &= ~PCI_EXP_DEVCTL_READRQ;
+               ctl |= (2 << 12);
+               pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl);
+       }
+}
+
 void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
 {
        /* enable the pflip int */
@@ -1863,6 +1888,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 
        WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
 
+       evergreen_fix_pci_max_read_req_size(rdev);
+
        cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
 
        cc_gc_shader_pipe_config |=
index a2e00fa..cbf57d7 100644 (file)
@@ -39,6 +39,7 @@ extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
 extern void evergreen_mc_program(struct radeon_device *rdev);
 extern void evergreen_irq_suspend(struct radeon_device *rdev);
 extern int evergreen_mc_init(struct radeon_device *rdev);
+extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
 
 #define EVERGREEN_PFP_UCODE_SIZE 1120
 #define EVERGREEN_PM4_UCODE_SIZE 1376
@@ -669,6 +670,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
 
        WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
 
+       evergreen_fix_pci_max_read_req_size(rdev);
+
        mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
        mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
index dcd0863..b6e18c8 100644 (file)
@@ -219,6 +219,9 @@ void radeon_get_clock_info(struct drm_device *dev)
                } else {
                        DRM_INFO("Using generic clock info\n");
 
+                       /* may need to be per card */
+                       rdev->clock.max_pixel_clock = 35000;
+
                        if (rdev->flags & RADEON_IS_IGP) {
                                p1pll->reference_freq = 1432;
                                p2pll->reference_freq = 1432;
index 7d27d2b..7484e1b 100644 (file)
 #define USB_DEVICE_ID_PENPOWER         0x00f4
 
 #define USB_VENDOR_ID_GREENASIA                0x0e8f
+#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD        0x3013
 
 #define USB_VENDOR_ID_GRETAGMACBETH    0x0971
 #define USB_DEVICE_ID_GRETAGMACBETH_HUEY       0x2005
index 0ec91c1..f0fbd7b 100644 (file)
@@ -81,6 +81,28 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
 #define NO_TOUCHES -1
 #define SINGLE_TOUCH_UP -2
 
+/* Touch surface information. Dimension is in hundredths of a mm, min and max
+ * are in units. */
+#define MOUSE_DIMENSION_X (float)9056
+#define MOUSE_MIN_X -1100
+#define MOUSE_MAX_X 1258
+#define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100))
+#define MOUSE_DIMENSION_Y (float)5152
+#define MOUSE_MIN_Y -1589
+#define MOUSE_MAX_Y 2047
+#define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100))
+
+#define TRACKPAD_DIMENSION_X (float)13000
+#define TRACKPAD_MIN_X -2909
+#define TRACKPAD_MAX_X 3167
+#define TRACKPAD_RES_X \
+       ((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100))
+#define TRACKPAD_DIMENSION_Y (float)11000
+#define TRACKPAD_MIN_Y -2456
+#define TRACKPAD_MAX_Y 2565
+#define TRACKPAD_RES_Y \
+       ((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100))
+
 /**
  * struct magicmouse_sc - Tracks Magic Mouse-specific data.
  * @input: Input device through which we report events.
@@ -406,17 +428,31 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
                 * inverse of the reported Y.
                 */
                if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
-                       input_set_abs_params(input, ABS_MT_POSITION_X, -1100,
-                               1358, 4, 0);
-                       input_set_abs_params(input, ABS_MT_POSITION_Y, -1589,
-                               2047, 4, 0);
+                       input_set_abs_params(input, ABS_MT_POSITION_X,
+                               MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
+                       input_set_abs_params(input, ABS_MT_POSITION_Y,
+                               MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
+
+                       input_abs_set_res(input, ABS_MT_POSITION_X,
+                               MOUSE_RES_X);
+                       input_abs_set_res(input, ABS_MT_POSITION_Y,
+                               MOUSE_RES_Y);
                } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
-                       input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0);
-                       input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0);
-                       input_set_abs_params(input, ABS_MT_POSITION_X, -2909,
-                               3167, 4, 0);
-                       input_set_abs_params(input, ABS_MT_POSITION_Y, -2456,
-                               2565, 4, 0);
+                       input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
+                               TRACKPAD_MAX_X, 4, 0);
+                       input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
+                               TRACKPAD_MAX_Y, 4, 0);
+                       input_set_abs_params(input, ABS_MT_POSITION_X,
+                               TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
+                       input_set_abs_params(input, ABS_MT_POSITION_Y,
+                               TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
+
+                       input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
+                       input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
+                       input_abs_set_res(input, ABS_MT_POSITION_X,
+                               TRACKPAD_RES_X);
+                       input_abs_set_res(input, ABS_MT_POSITION_Y,
+                               TRACKPAD_RES_Y);
                }
 
                input_set_events_per_packet(input, 60);
@@ -501,9 +537,17 @@ static int magicmouse_probe(struct hid_device *hdev,
        }
        report->size = 6;
 
+       /*
+        * Some devices repond with 'invalid report id' when feature
+        * report switching it into multitouch mode is sent to it.
+        *
+        * This results in -EIO from the _raw low-level transport callback,
+        * but there seems to be no other way of switching the mode.
+        * Thus the super-ugly hacky success check below.
+        */
        ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
                        HID_FEATURE_REPORT);
-       if (ret != sizeof(feature)) {
+       if (ret != -EIO && ret != sizeof(feature)) {
                hid_err(hdev, "unable to request touch data (%d)\n", ret);
                goto err_stop_hw;
        }
index 0688832..a597039 100644 (file)
@@ -353,11 +353,7 @@ static int wacom_probe(struct hid_device *hdev,
        if (ret) {
                hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",
                         ret);
-               /*
-                * battery attribute is not critical for the tablet, but if it
-                * failed then there is no need to create ac attribute
-                */
-               goto move_on;
+               goto err_battery;
        }
 
        wdata->ac.properties = wacom_ac_props;
@@ -371,14 +367,8 @@ static int wacom_probe(struct hid_device *hdev,
        if (ret) {
                hid_warn(hdev,
                         "can't create ac battery attribute, err: %d\n", ret);
-               /*
-                * ac attribute is not critical for the tablet, but if it
-                * failed then we don't want to battery attribute to exist
-                */
-               power_supply_unregister(&wdata->battery);
+               goto err_ac;
        }
-
-move_on:
 #endif
        hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
        input = hidinput->input;
@@ -416,6 +406,13 @@ move_on:
 
        return 0;
 
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+err_ac:
+       power_supply_unregister(&wdata->battery);
+err_battery:
+       device_remove_file(&hdev->dev, &dev_attr_speed);
+       hid_hw_stop(hdev);
+#endif
 err_free:
        kfree(wdata);
        return ret;
@@ -426,6 +423,7 @@ static void wacom_remove(struct hid_device *hdev)
 #ifdef CONFIG_HID_WACOM_POWER_SUPPLY
        struct wacom_data *wdata = hid_get_drvdata(hdev);
 #endif
+       device_remove_file(&hdev->dev, &dev_attr_speed);
        hid_hw_stop(hdev);
 
 #ifdef CONFIG_HID_WACOM_POWER_SUPPLY
index 4bdb5d4..3146fdc 100644 (file)
@@ -47,6 +47,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
 
        { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
        { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
index d94a24f..dd2d7b9 100644 (file)
@@ -124,7 +124,7 @@ static inline int MV_TO_LIMIT(int mv, int range)
 
 static inline int ADC_TO_CURR(int adc, int gain)
 {
-       return adc * 1400000 / gain * 255;
+       return adc * 1400000 / (gain * 255);
 }
 
 /*
index ace1c73..d0ddb60 100644 (file)
@@ -141,13 +141,11 @@ static int ucd9000_probe(struct i2c_client *client,
        block_buffer[ret] = '\0';
        dev_info(&client->dev, "Device ID %s\n", block_buffer);
 
-       mid = NULL;
-       for (i = 0; i < ARRAY_SIZE(ucd9000_id); i++) {
-               mid = &ucd9000_id[i];
+       for (mid = ucd9000_id; mid->name[0]; mid++) {
                if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
                        break;
        }
-       if (!mid || !strlen(mid->name)) {
+       if (!mid->name[0]) {
                dev_err(&client->dev, "Unsupported device\n");
                return -ENODEV;
        }
index ffcc1cf..c65e9da 100644 (file)
@@ -68,13 +68,11 @@ static int ucd9200_probe(struct i2c_client *client,
        block_buffer[ret] = '\0';
        dev_info(&client->dev, "Device ID %s\n", block_buffer);
 
-       mid = NULL;
-       for (i = 0; i < ARRAY_SIZE(ucd9200_id); i++) {
-               mid = &ucd9200_id[i];
+       for (mid = ucd9200_id; mid->name[0]; mid++) {
                if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
                        break;
        }
-       if (!mid || !strlen(mid->name)) {
+       if (!mid->name[0]) {
                dev_err(&client->dev, "Unsupported device\n");
                return -ENODEV;
        }
index 6659d26..b73da6c 100644 (file)
@@ -109,12 +109,15 @@ static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
                return -EINVAL;
        }
        sds = kzalloc(sizeof(*sds), GFP_KERNEL);
-       if (!sds)
+       if (!sds) {
+               ret = -ENOMEM;
                goto err_mem;
+       }
 
        for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
                sds->pdev[i] = add_i2c_device(dev, i);
                if (IS_ERR(sds->pdev[i])) {
+                       ret = PTR_ERR(sds->pdev[i]);
                        while (--i >= 0)
                                platform_device_unregister(sds->pdev[i]);
                        goto err_dev_add;
index 2440b74..3c94c4a 100644 (file)
@@ -270,14 +270,30 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
 
        /* Rounds down to not include partial word at the end of buf */
        words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
-       if (words_to_transfer > tx_fifo_avail)
-               words_to_transfer = tx_fifo_avail;
 
-       i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
-
-       buf += words_to_transfer * BYTES_PER_FIFO_WORD;
-       buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
-       tx_fifo_avail -= words_to_transfer;
+       /* It's very common to have < 4 bytes, so optimize that case. */
+       if (words_to_transfer) {
+               if (words_to_transfer > tx_fifo_avail)
+                       words_to_transfer = tx_fifo_avail;
+
+               /*
+                * Update state before writing to FIFO.  If this casues us
+                * to finish writing all bytes (AKA buf_remaining goes to 0) we
+                * have a potential for an interrupt (PACKET_XFER_COMPLETE is
+                * not maskable).  We need to make sure that the isr sees
+                * buf_remaining as 0 and doesn't call us back re-entrantly.
+                */
+               buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
+               tx_fifo_avail -= words_to_transfer;
+               i2c_dev->msg_buf_remaining = buf_remaining;
+               i2c_dev->msg_buf = buf +
+                       words_to_transfer * BYTES_PER_FIFO_WORD;
+               barrier();
+
+               i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+
+               buf += words_to_transfer * BYTES_PER_FIFO_WORD;
+       }
 
        /*
         * If there is a partial word at the end of buf, handle it manually to
@@ -287,14 +303,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
        if (tx_fifo_avail > 0 && buf_remaining > 0) {
                BUG_ON(buf_remaining > 3);
                memcpy(&val, buf, buf_remaining);
+
+               /* Again update before writing to FIFO to make sure isr sees. */
+               i2c_dev->msg_buf_remaining = 0;
+               i2c_dev->msg_buf = NULL;
+               barrier();
+
                i2c_writel(i2c_dev, val, I2C_TX_FIFO);
-               buf_remaining = 0;
-               tx_fifo_avail--;
        }
 
-       BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
-       i2c_dev->msg_buf_remaining = buf_remaining;
-       i2c_dev->msg_buf = buf;
        return 0;
 }
 
@@ -411,9 +428,10 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
                        tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
        }
 
-       if ((status & I2C_INT_PACKET_XFER_COMPLETE) &&
-                       !i2c_dev->msg_buf_remaining)
+       if (status & I2C_INT_PACKET_XFER_COMPLETE) {
+               BUG_ON(i2c_dev->msg_buf_remaining);
                complete(&i2c_dev->msg_complete);
+       }
 
        i2c_writel(i2c_dev, status, I2C_INT_STATUS);
        if (i2c_dev->is_dvc)
@@ -531,7 +549,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 
 static u32 tegra_i2c_func(struct i2c_adapter *adap)
 {
-       return I2C_FUNC_I2C;
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
 static const struct i2c_algorithm tegra_i2c_algo = {
@@ -719,6 +737,17 @@ static int tegra_i2c_resume(struct platform_device *pdev)
 }
 #endif
 
+#if defined(CONFIG_OF)
+/* Match table for of_platform binding */
+static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
+       { .compatible = "nvidia,tegra20-i2c", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
+#else
+#define tegra_i2c_of_match NULL
+#endif
+
 static struct platform_driver tegra_i2c_driver = {
        .probe   = tegra_i2c_probe,
        .remove  = tegra_i2c_remove,
@@ -729,6 +758,7 @@ static struct platform_driver tegra_i2c_driver = {
        .driver  = {
                .name  = "tegra-i2c",
                .owner = THIS_MODULE,
+               .of_match_table = tegra_i2c_of_match,
        },
 };
 
index a14f8dc..0e4227f 100644 (file)
@@ -605,7 +605,9 @@ static void build_inv_all(struct iommu_cmd *cmd)
  * Writes the command to the IOMMUs command buffer and informs the
  * hardware about the new command.
  */
-static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
+static int iommu_queue_command_sync(struct amd_iommu *iommu,
+                                   struct iommu_cmd *cmd,
+                                   bool sync)
 {
        u32 left, tail, head, next_tail;
        unsigned long flags;
@@ -639,13 +641,18 @@ again:
        copy_cmd_to_buffer(iommu, cmd, tail);
 
        /* We need to sync now to make sure all commands are processed */
-       iommu->need_sync = true;
+       iommu->need_sync = sync;
 
        spin_unlock_irqrestore(&iommu->lock, flags);
 
        return 0;
 }
 
+static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
+{
+       return iommu_queue_command_sync(iommu, cmd, true);
+}
+
 /*
  * This function queues a completion wait command into the command
  * buffer of an IOMMU
@@ -661,7 +668,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
 
        build_completion_wait(&cmd, (u64)&sem);
 
-       ret = iommu_queue_command(iommu, &cmd);
+       ret = iommu_queue_command_sync(iommu, &cmd, false);
        if (ret)
                return ret;
 
@@ -840,14 +847,9 @@ static void domain_flush_complete(struct protection_domain *domain)
 static void domain_flush_devices(struct protection_domain *domain)
 {
        struct iommu_dev_data *dev_data;
-       unsigned long flags;
-
-       spin_lock_irqsave(&domain->lock, flags);
 
        list_for_each_entry(dev_data, &domain->dev_list, list)
                device_flush_dte(dev_data);
-
-       spin_unlock_irqrestore(&domain->lock, flags);
 }
 
 /****************************************************************************
index 0ce29b6..2f2da05 100644 (file)
@@ -10,9 +10,9 @@ typedef struct dev_info dev_info_t;
 
 struct linear_private_data
 {
+       struct rcu_head         rcu;
        sector_t                array_sectors;
        dev_info_t              disks[0];
-       struct rcu_head         rcu;
 };
 
 
index 8e221a2..5404b22 100644 (file)
@@ -848,7 +848,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
        bio->bi_end_io = super_written;
 
        atomic_inc(&mddev->pending_writes);
-       submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio);
+       submit_bio(WRITE_FLUSH_FUA, bio);
 }
 
 void md_super_wait(mddev_t *mddev)
@@ -1138,8 +1138,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
                        ret = 0;
        }
        rdev->sectors = rdev->sb_start;
+       /* Limit to 4TB as metadata cannot record more than that */
+       if (rdev->sectors >= (2ULL << 32))
+               rdev->sectors = (2ULL << 32) - 2;
 
-       if (rdev->sectors < sb->size * 2 && sb->level > 1)
+       if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
                /* "this cannot possibly happen" ... */
                ret = -EINVAL;
 
@@ -1173,7 +1176,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->clevel[0] = 0;
                mddev->layout = sb->layout;
                mddev->raid_disks = sb->raid_disks;
-               mddev->dev_sectors = sb->size * 2;
+               mddev->dev_sectors = ((sector_t)sb->size) * 2;
                mddev->events = ev1;
                mddev->bitmap_info.offset = 0;
                mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
@@ -1415,6 +1418,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
        rdev->sb_start = calc_dev_sboffset(rdev);
        if (!num_sectors || num_sectors > rdev->sb_start)
                num_sectors = rdev->sb_start;
+       /* Limit to 4TB as metadata cannot record more than that.
+        * 4TB == 2^32 KB, or 2*2^32 sectors.
+        */
+       if (num_sectors >= (2ULL << 32))
+               num_sectors = (2ULL << 32) - 2;
        md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
                       rdev->sb_page);
        md_super_wait(rdev->mddev);
@@ -1738,6 +1746,11 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        sb->level = cpu_to_le32(mddev->level);
        sb->layout = cpu_to_le32(mddev->layout);
 
+       if (test_bit(WriteMostly, &rdev->flags))
+               sb->devflags |= WriteMostly1;
+       else
+               sb->devflags &= ~WriteMostly1;
+
        if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
                sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
                sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
@@ -2561,7 +2574,10 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
        int err = -EINVAL;
        if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
                md_error(rdev->mddev, rdev);
-               err = 0;
+               if (test_bit(Faulty, &rdev->flags))
+                       err = 0;
+               else
+                       err = -EBUSY;
        } else if (cmd_match(buf, "remove")) {
                if (rdev->raid_disk >= 0)
                        err = -EBUSY;
@@ -2584,7 +2600,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                err = 0;
        } else if (cmd_match(buf, "-blocked")) {
                if (!test_bit(Faulty, &rdev->flags) &&
-                   test_bit(BlockedBadBlocks, &rdev->flags)) {
+                   rdev->badblocks.unacked_exist) {
                        /* metadata handler doesn't understand badblocks,
                         * so we need to fail the device
                         */
@@ -5983,6 +5999,8 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev)
                return -ENODEV;
 
        md_error(mddev, rdev);
+       if (!test_bit(Faulty, &rdev->flags))
+               return -EBUSY;
        return 0;
 }
 
index 32323f0..f4622dd 100644 (file)
@@ -1099,12 +1099,11 @@ read_again:
                bio_list_add(&conf->pending_bio_list, mbio);
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
-       r1_bio_write_done(r1_bio);
-
-       /* In case raid1d snuck in to freeze_array */
-       wake_up(&conf->wait_barrier);
-
+       /* Mustn't call r1_bio_write_done before this next test,
+        * as it could result in the bio being freed.
+        */
        if (sectors_handled < (bio->bi_size >> 9)) {
+               r1_bio_write_done(r1_bio);
                /* We need another r1_bio.  It has already been counted
                 * in bio->bi_phys_segments
                 */
@@ -1117,6 +1116,11 @@ read_again:
                goto retry_write;
        }
 
+       r1_bio_write_done(r1_bio);
+
+       /* In case raid1d snuck in to freeze_array */
+       wake_up(&conf->wait_barrier);
+
        if (do_sync || !bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
 
index 8b29cd4..d7a8468 100644 (file)
@@ -337,6 +337,21 @@ static void close_write(r10bio_t *r10_bio)
        md_write_end(r10_bio->mddev);
 }
 
+static void one_write_done(r10bio_t *r10_bio)
+{
+       if (atomic_dec_and_test(&r10_bio->remaining)) {
+               if (test_bit(R10BIO_WriteError, &r10_bio->state))
+                       reschedule_retry(r10_bio);
+               else {
+                       close_write(r10_bio);
+                       if (test_bit(R10BIO_MadeGood, &r10_bio->state))
+                               reschedule_retry(r10_bio);
+                       else
+                               raid_end_bio_io(r10_bio);
+               }
+       }
+}
+
 static void raid10_end_write_request(struct bio *bio, int error)
 {
        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -387,17 +402,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
         * Let's see if all mirrored write operations have finished
         * already.
         */
-       if (atomic_dec_and_test(&r10_bio->remaining)) {
-               if (test_bit(R10BIO_WriteError, &r10_bio->state))
-                       reschedule_retry(r10_bio);
-               else {
-                       close_write(r10_bio);
-                       if (test_bit(R10BIO_MadeGood, &r10_bio->state))
-                               reschedule_retry(r10_bio);
-                       else
-                               raid_end_bio_io(r10_bio);
-               }
-       }
+       one_write_done(r10_bio);
        if (dec_rdev)
                rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
 }
@@ -1127,20 +1132,12 @@ retry_write:
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
 
-       if (atomic_dec_and_test(&r10_bio->remaining)) {
-               /* This matches the end of raid10_end_write_request() */
-               bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
-                               r10_bio->sectors,
-                               !test_bit(R10BIO_Degraded, &r10_bio->state),
-                               0);
-               md_write_end(mddev);
-               raid_end_bio_io(r10_bio);
-       }
-
-       /* In case raid10d snuck in to freeze_array */
-       wake_up(&conf->wait_barrier);
+       /* Don't remove the bias on 'remaining' (one_write_done) until
+        * after checking if we need to go around again.
+        */
 
        if (sectors_handled < (bio->bi_size >> 9)) {
+               one_write_done(r10_bio);
                /* We need another r10_bio.  It has already been counted
                 * in bio->bi_phys_segments.
                 */
@@ -1154,6 +1151,10 @@ retry_write:
                r10_bio->state = 0;
                goto retry_write;
        }
+       one_write_done(r10_bio);
+
+       /* In case raid10d snuck in to freeze_array */
+       wake_up(&conf->wait_barrier);
 
        if (do_sync || !mddev->bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
index dbae459..43709fa 100644 (file)
@@ -3336,7 +3336,7 @@ static void handle_stripe(struct stripe_head *sh)
 
 finish:
        /* wait for this device to become unblocked */
-       if (unlikely(s.blocked_rdev))
+       if (conf->mddev->external && unlikely(s.blocked_rdev))
                md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
 
        if (s.handle_bad_blocks)
index 91a0a74..b27b940 100644 (file)
@@ -133,7 +133,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
                if (mrq->done)
                        mrq->done(mrq);
 
-               mmc_host_clk_gate(host);
+               mmc_host_clk_release(host);
        }
 }
 
@@ -192,7 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
                        mrq->stop->mrq = mrq;
                }
        }
-       mmc_host_clk_ungate(host);
+       mmc_host_clk_hold(host);
        led_trigger_event(host->led, LED_FULL);
        host->ops->request(host, mrq);
 }
@@ -728,15 +728,17 @@ static inline void mmc_set_ios(struct mmc_host *host)
  */
 void mmc_set_chip_select(struct mmc_host *host, int mode)
 {
+       mmc_host_clk_hold(host);
        host->ios.chip_select = mode;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /*
  * Sets the host clock to the highest possible frequency that
  * is below "hz".
  */
-void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
 {
        WARN_ON(hz < host->f_min);
 
@@ -747,6 +749,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
        mmc_set_ios(host);
 }
 
+void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+{
+       mmc_host_clk_hold(host);
+       __mmc_set_clock(host, hz);
+       mmc_host_clk_release(host);
+}
+
 #ifdef CONFIG_MMC_CLKGATE
 /*
  * This gates the clock by setting it to 0 Hz.
@@ -779,7 +788,7 @@ void mmc_ungate_clock(struct mmc_host *host)
        if (host->clk_old) {
                BUG_ON(host->ios.clock);
                /* This call will also set host->clk_gated to false */
-               mmc_set_clock(host, host->clk_old);
+               __mmc_set_clock(host, host->clk_old);
        }
 }
 
@@ -807,8 +816,10 @@ void mmc_set_ungated(struct mmc_host *host)
  */
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
 {
+       mmc_host_clk_hold(host);
        host->ios.bus_mode = mode;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /*
@@ -816,8 +827,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
  */
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
 {
+       mmc_host_clk_hold(host);
        host->ios.bus_width = width;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /**
@@ -1015,8 +1028,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
 
                ocr &= 3 << bit;
 
+               mmc_host_clk_hold(host);
                host->ios.vdd = bit;
                mmc_set_ios(host);
+               mmc_host_clk_release(host);
        } else {
                pr_warning("%s: host doesn't support card's voltages\n",
                                mmc_hostname(host));
@@ -1063,8 +1078,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
  */
 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
 {
+       mmc_host_clk_hold(host);
        host->ios.timing = timing;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /*
@@ -1072,8 +1089,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
  */
 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
 {
+       mmc_host_clk_hold(host);
        host->ios.drv_type = drv_type;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /*
@@ -1091,6 +1110,8 @@ static void mmc_power_up(struct mmc_host *host)
 {
        int bit;
 
+       mmc_host_clk_hold(host);
+
        /* If ocr is set, we use it */
        if (host->ocr)
                bit = ffs(host->ocr) - 1;
@@ -1126,10 +1147,14 @@ static void mmc_power_up(struct mmc_host *host)
         * time required to reach a stable voltage.
         */
        mmc_delay(10);
+
+       mmc_host_clk_release(host);
 }
 
 static void mmc_power_off(struct mmc_host *host)
 {
+       mmc_host_clk_hold(host);
+
        host->ios.clock = 0;
        host->ios.vdd = 0;
 
@@ -1147,6 +1172,8 @@ static void mmc_power_off(struct mmc_host *host)
        host->ios.bus_width = MMC_BUS_WIDTH_1;
        host->ios.timing = MMC_TIMING_LEGACY;
        mmc_set_ios(host);
+
+       mmc_host_clk_release(host);
 }
 
 /*
index b29d3e8..793d0a0 100644 (file)
@@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work)
 }
 
 /**
- *     mmc_host_clk_ungate - ungate hardware MCI clocks
+ *     mmc_host_clk_hold - ungate hardware MCI clocks
  *     @host: host to ungate.
  *
  *     Makes sure the host ios.clock is restored to a non-zero value
  *     past this call. Increase clock reference count and ungate clock
  *     if we're the first user.
  */
-void mmc_host_clk_ungate(struct mmc_host *host)
+void mmc_host_clk_hold(struct mmc_host *host)
 {
        unsigned long flags;
 
@@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
 }
 
 /**
- *     mmc_host_clk_gate - gate off hardware MCI clocks
+ *     mmc_host_clk_release - gate off hardware MCI clocks
  *     @host: host to gate.
  *
  *     Calls the host driver with ios.clock set to zero as often as possible
  *     in order to gate off hardware MCI clocks. Decrease clock reference
  *     count and schedule disabling of clock.
  */
-void mmc_host_clk_gate(struct mmc_host *host)
+void mmc_host_clk_release(struct mmc_host *host)
 {
        unsigned long flags;
 
@@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host)
        host->clk_requests--;
        if (mmc_host_may_gate_card(host->card) &&
            !host->clk_requests)
-               schedule_work(&host->clk_gate_work);
+               queue_work(system_nrt_wq, &host->clk_gate_work);
        spin_unlock_irqrestore(&host->clk_lock, flags);
 }
 
@@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
        if (cancel_work_sync(&host->clk_gate_work))
                mmc_host_clk_gate_delayed(host);
        if (host->clk_gated)
-               mmc_host_clk_ungate(host);
+               mmc_host_clk_hold(host);
        /* There should be only one user now */
        WARN_ON(host->clk_requests > 1);
 }
index de199f9..fb8a5cd 100644 (file)
@@ -16,16 +16,16 @@ int mmc_register_host_class(void);
 void mmc_unregister_host_class(void);
 
 #ifdef CONFIG_MMC_CLKGATE
-void mmc_host_clk_ungate(struct mmc_host *host);
-void mmc_host_clk_gate(struct mmc_host *host);
+void mmc_host_clk_hold(struct mmc_host *host);
+void mmc_host_clk_release(struct mmc_host *host);
 unsigned int mmc_host_clk_rate(struct mmc_host *host);
 
 #else
-static inline void mmc_host_clk_ungate(struct mmc_host *host)
+static inline void mmc_host_clk_hold(struct mmc_host *host)
 {
 }
 
-static inline void mmc_host_clk_gate(struct mmc_host *host)
+static inline void mmc_host_clk_release(struct mmc_host *host)
 {
 }
 
index 633975f..0370e03 100644 (file)
@@ -469,56 +469,75 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
        return 0;
 }
 
-static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
+static void sd_update_bus_speed_mode(struct mmc_card *card)
 {
-       unsigned int bus_speed = 0, timing = 0;
-       int err;
-
        /*
         * If the host doesn't support any of the UHS-I modes, fallback on
         * default speed.
         */
        if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
-           MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
-               return 0;
+           MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) {
+               card->sd_bus_speed = 0;
+               return;
+       }
 
        if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
            (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
-                       bus_speed = UHS_SDR104_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_SDR104;
-                       card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+                       card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
        } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
                   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
-                       bus_speed = UHS_DDR50_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_DDR50;
-                       card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+                       card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
        } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
                    MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
                    SD_MODE_UHS_SDR50)) {
-                       bus_speed = UHS_SDR50_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_SDR50;
-                       card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+                       card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
        } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
                    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
                   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
-                       bus_speed = UHS_SDR25_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_SDR25;
-                       card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+                       card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
        } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
                    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
                    MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
                    SD_MODE_UHS_SDR12)) {
-                       bus_speed = UHS_SDR12_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_SDR12;
-                       card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+                       card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
+       }
+}
+
+static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
+{
+       int err;
+       unsigned int timing = 0;
+
+       switch (card->sd_bus_speed) {
+       case UHS_SDR104_BUS_SPEED:
+               timing = MMC_TIMING_UHS_SDR104;
+               card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+               break;
+       case UHS_DDR50_BUS_SPEED:
+               timing = MMC_TIMING_UHS_DDR50;
+               card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+               break;
+       case UHS_SDR50_BUS_SPEED:
+               timing = MMC_TIMING_UHS_SDR50;
+               card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+               break;
+       case UHS_SDR25_BUS_SPEED:
+               timing = MMC_TIMING_UHS_SDR25;
+               card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+               break;
+       case UHS_SDR12_BUS_SPEED:
+               timing = MMC_TIMING_UHS_SDR12;
+               card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+               break;
+       default:
+               return 0;
        }
 
-       card->sd_bus_speed = bus_speed;
-       err = mmc_sd_switch(card, 1, 0, bus_speed, status);
+       err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
        if (err)
                return err;
 
-       if ((status[16] & 0xF) != bus_speed)
+       if ((status[16] & 0xF) != card->sd_bus_speed)
                printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
                        mmc_hostname(card->host));
        else {
@@ -618,18 +637,24 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
                mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
        }
 
+       /*
+        * Select the bus speed mode depending on host
+        * and card capability.
+        */
+       sd_update_bus_speed_mode(card);
+
        /* Set the driver strength for the card */
        err = sd_select_driver_type(card, status);
        if (err)
                goto out;
 
-       /* Set bus speed mode of the card */
-       err = sd_set_bus_speed_mode(card, status);
+       /* Set current limit for the card */
+       err = sd_set_current_limit(card, status);
        if (err)
                goto out;
 
-       /* Set current limit for the card */
-       err = sd_set_current_limit(card, status);
+       /* Set bus speed mode of the card */
+       err = sd_set_bus_speed_mode(card, status);
        if (err)
                goto out;
 
index 0e9780f..4dc0028 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/gpio.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
index 2bd7bf4..fe886d6 100644 (file)
@@ -302,6 +302,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
                ctrl &= ~SDHCI_CTRL_8BITBUS;
                break;
        default:
+               ctrl &= ~SDHCI_CTRL_4BITBUS;
+               ctrl &= ~SDHCI_CTRL_8BITBUS;
                break;
        }
 
index 774f643..0c4a672 100644 (file)
@@ -120,11 +120,11 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
        mmc_data->hclk = clk_get_rate(priv->clk);
        mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
        mmc_data->get_cd = sh_mobile_sdhi_get_cd;
-       if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
-               mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
        mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
        if (p) {
                mmc_data->flags = p->tmio_flags;
+               if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
+                       mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
                mmc_data->ocr_mask = p->tmio_ocr_mask;
                mmc_data->capabilities |= p->tmio_caps;
 
index 65b5b76..64fbb00 100644 (file)
@@ -181,7 +181,7 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
 
 #define ubi_dbg_msg(fmt, ...) do {                                           \
        if (0)                                                               \
-               pr_debug(fmt "\n", ##__VA_ARGS__);                           \
+               printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__);                  \
 } while (0)
 
 #define dbg_msg(fmt, ...)  ubi_dbg_msg(fmt, ##__VA_ARGS__)
index 753b21a..3ffd9c1 100644 (file)
@@ -169,7 +169,9 @@ void pci_configure_slot(struct pci_dev *dev)
                        (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
                return;
 
-       pcie_bus_configure_settings(dev->bus, dev->bus->self->pcie_mpss);
+       if (dev->bus && dev->bus->self)
+               pcie_bus_configure_settings(dev->bus,
+                                           dev->bus->self->pcie_mpss);
 
        memset(&hpp, 0, sizeof(hpp));
        ret = pci_get_hp_params(dev, &hpp);
index 0ce6742..4e84fd4 100644 (file)
@@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
 
-enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
 
 /*
  * The default CLS is used if arch didn't set CLS explicitly and not
index 8473727..b1187ff 100644 (file)
@@ -1396,34 +1396,37 @@ static void pcie_write_mps(struct pci_dev *dev, int mps)
 
 static void pcie_write_mrrs(struct pci_dev *dev, int mps)
 {
-       int rc, mrrs;
+       int rc, mrrs, dev_mpss;
 
-       if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
-               int dev_mpss = 128 << dev->pcie_mpss;
+       /* In the "safe" case, do not configure the MRRS.  There appear to be
+        * issues with setting MRRS to 0 on a number of devices.
+        */
 
-               /* For Max performance, the MRRS must be set to the largest
-                * supported value.  However, it cannot be configured larger
-                * than the MPS the device or the bus can support.  This assumes
-                * that the largest MRRS available on the device cannot be
-                * smaller than the device MPSS.
-                */
-               mrrs = mps < dev_mpss ? mps : dev_mpss;
-       } else
-               /* In the "safe" case, configure the MRRS for fairness on the
-                * bus by making all devices have the same size
-                */
-               mrrs = mps;
+       if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
+               return;
 
+       dev_mpss = 128 << dev->pcie_mpss;
+
+       /* For Max performance, the MRRS must be set to the largest supported
+        * value.  However, it cannot be configured larger than the MPS the
+        * device or the bus can support.  This assumes that the largest MRRS
+        * available on the device cannot be smaller than the device MPSS.
+        */
+       mrrs = min(mps, dev_mpss);
 
        /* MRRS is a R/W register.  Invalid values can be written, but a
-        * subsiquent read will verify if the value is acceptable or not.
+        * subsequent read will verify if the value is acceptable or not.
         * If the MRRS value provided is not acceptable (e.g., too large),
         * shrink the value until it is acceptable to the HW.
         */
        while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
+               dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value"
+                        " to %d.  If any issues are encountered, please try "
+                        "running with pci=pcie_bus_safe\n", mrrs);
                rc = pcie_set_readrq(dev, mrrs);
                if (rc)
-                       dev_err(&dev->dev, "Failed attempting to set the MRRS\n");
+                       dev_err(&dev->dev,
+                               "Failed attempting to set the MRRS\n");
 
                mrrs /= 2;
        }
@@ -1436,13 +1439,13 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
        if (!pci_is_pcie(dev))
                return 0;
 
-       dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
+       dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
                 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
 
        pcie_write_mps(dev, mps);
        pcie_write_mrrs(dev, mps);
 
-       dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
+       dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
                 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
 
        return 0;
@@ -1456,9 +1459,6 @@ void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
 {
        u8 smpss = mpss;
 
-       if (!bus->self)
-               return;
-
        if (!pci_is_pcie(bus->self))
                return;
 
index 335551d..14a42a1 100644 (file)
@@ -36,6 +36,7 @@
  */
 struct ep93xx_rtc {
        void __iomem    *mmio_base;
+       struct rtc_device *rtc;
 };
 
 static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
@@ -130,7 +131,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
 {
        struct ep93xx_rtc *ep93xx_rtc;
        struct resource *res;
-       struct rtc_device *rtc;
        int err;
 
        ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
@@ -151,12 +151,12 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
                return -ENXIO;
 
        pdev->dev.platform_data = ep93xx_rtc;
-       platform_set_drvdata(pdev, rtc);
+       platform_set_drvdata(pdev, ep93xx_rtc);
 
-       rtc = rtc_device_register(pdev->name,
+       ep93xx_rtc->rtc = rtc_device_register(pdev->name,
                                &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
-       if (IS_ERR(rtc)) {
-               err = PTR_ERR(rtc);
+       if (IS_ERR(ep93xx_rtc->rtc)) {
+               err = PTR_ERR(ep93xx_rtc->rtc);
                goto exit;
        }
 
@@ -167,7 +167,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
        return 0;
 
 fail:
-       rtc_device_unregister(rtc);
+       rtc_device_unregister(ep93xx_rtc->rtc);
 exit:
        platform_set_drvdata(pdev, NULL);
        pdev->dev.platform_data = NULL;
@@ -176,11 +176,11 @@ exit:
 
 static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
 {
-       struct rtc_device *rtc = platform_get_drvdata(pdev);
+       struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
 
        sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
        platform_set_drvdata(pdev, NULL);
-       rtc_device_unregister(rtc);
+       rtc_device_unregister(ep93xx_rtc->rtc);
        pdev->dev.platform_data = NULL;
 
        return 0;
index 075f170..c4cf057 100644 (file)
@@ -85,6 +85,8 @@ void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
        time -= tm->tm_hour * 3600;
        tm->tm_min = time / 60;
        tm->tm_sec = time - tm->tm_min * 60;
+
+       tm->tm_isdst = 0;
 }
 EXPORT_SYMBOL(rtc_time_to_tm);
 
index 9a81f77..20687d5 100644 (file)
@@ -362,14 +362,6 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
        int res;
        u8 rd_reg;
 
-#ifdef CONFIG_LOCKDEP
-       /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
-        * we don't want and can't tolerate.  Although it might be
-        * friendlier not to borrow this thread context...
-        */
-       local_irq_enable();
-#endif
-
        res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
        if (res)
                goto out;
@@ -428,24 +420,12 @@ static struct rtc_class_ops twl_rtc_ops = {
 static int __devinit twl_rtc_probe(struct platform_device *pdev)
 {
        struct rtc_device *rtc;
-       int ret = 0;
+       int ret = -EINVAL;
        int irq = platform_get_irq(pdev, 0);
        u8 rd_reg;
 
        if (irq <= 0)
-               return -EINVAL;
-
-       rtc = rtc_device_register(pdev->name,
-                                 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
-       if (IS_ERR(rtc)) {
-               ret = PTR_ERR(rtc);
-               dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
-                       PTR_ERR(rtc));
-               goto out0;
-
-       }
-
-       platform_set_drvdata(pdev, rtc);
+               goto out1;
 
        ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
        if (ret < 0)
@@ -462,14 +442,6 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
        if (ret < 0)
                goto out1;
 
-       ret = request_irq(irq, twl_rtc_interrupt,
-                               IRQF_TRIGGER_RISING,
-                               dev_name(&rtc->dev), rtc);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "IRQ is not free.\n");
-               goto out1;
-       }
-
        if (twl_class_is_6030()) {
                twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
                        REG_INT_MSK_LINE_A);
@@ -480,28 +452,44 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
        /* Check RTC module status, Enable if it is off */
        ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
        if (ret < 0)
-               goto out2;
+               goto out1;
 
        if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
                dev_info(&pdev->dev, "Enabling TWL-RTC.\n");
                rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
                ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
                if (ret < 0)
-                       goto out2;
+                       goto out1;
        }
 
        /* init cached IRQ enable bits */
        ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
        if (ret < 0)
+               goto out1;
+
+       rtc = rtc_device_register(pdev->name,
+                                 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
+       if (IS_ERR(rtc)) {
+               ret = PTR_ERR(rtc);
+               dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
+                       PTR_ERR(rtc));
+               goto out1;
+       }
+
+       ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
+                                  IRQF_TRIGGER_RISING,
+                                  dev_name(&rtc->dev), rtc);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "IRQ is not free.\n");
                goto out2;
+       }
 
-       return ret;
+       platform_set_drvdata(pdev, rtc);
+       return 0;
 
 out2:
-       free_irq(irq, rtc);
-out1:
        rtc_device_unregister(rtc);
-out0:
+out1:
        return ret;
 }
 
index 80d292f..7363c1b 100644 (file)
@@ -19,7 +19,7 @@
 #include <asm/backlight.h>
 #endif
 
-static const char const *backlight_types[] = {
+static const char *const backlight_types[] = {
        [BACKLIGHT_RAW] = "raw",
        [BACKLIGHT_PLATFORM] = "platform",
        [BACKLIGHT_FIRMWARE] = "firmware",
index 46ce357..410ffd6 100644 (file)
@@ -54,9 +54,9 @@ extern struct kmem_cache *v9fs_inode_cache;
 
 struct inode *v9fs_alloc_inode(struct super_block *sb);
 void v9fs_destroy_inode(struct inode *inode);
-struct inode *v9fs_get_inode(struct super_block *sb, int mode);
+struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t);
 int v9fs_init_inode(struct v9fs_session_info *v9ses,
-                   struct inode *inode, int mode);
+                   struct inode *inode, int mode, dev_t);
 void v9fs_evict_inode(struct inode *inode);
 ino_t v9fs_qid2ino(struct p9_qid *qid);
 void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
@@ -83,4 +83,6 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode)
        v9inode->cache_validity |= V9FS_INO_INVALID_ATTR;
        return;
 }
+
+int v9fs_open_to_dotl_flags(int flags);
 #endif
index 3c173fc..62857a8 100644 (file)
@@ -65,7 +65,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
        v9inode = V9FS_I(inode);
        v9ses = v9fs_inode2v9ses(inode);
        if (v9fs_proto_dotl(v9ses))
-               omode = file->f_flags;
+               omode = v9fs_open_to_dotl_flags(file->f_flags);
        else
                omode = v9fs_uflags2omode(file->f_flags,
                                        v9fs_proto_dotu(v9ses));
@@ -169,7 +169,18 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
 
        /* convert posix lock to p9 tlock args */
        memset(&flock, 0, sizeof(flock));
-       flock.type = fl->fl_type;
+       /* map the lock type */
+       switch (fl->fl_type) {
+       case F_RDLCK:
+               flock.type = P9_LOCK_TYPE_RDLCK;
+               break;
+       case F_WRLCK:
+               flock.type = P9_LOCK_TYPE_WRLCK;
+               break;
+       case F_UNLCK:
+               flock.type = P9_LOCK_TYPE_UNLCK;
+               break;
+       }
        flock.start = fl->fl_start;
        if (fl->fl_end == OFFSET_MAX)
                flock.length = 0;
@@ -245,7 +256,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
 
        /* convert posix lock to p9 tgetlock args */
        memset(&glock, 0, sizeof(glock));
-       glock.type = fl->fl_type;
+       glock.type  = P9_LOCK_TYPE_UNLCK;
        glock.start = fl->fl_start;
        if (fl->fl_end == OFFSET_MAX)
                glock.length = 0;
@@ -257,17 +268,26 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
        res = p9_client_getlock_dotl(fid, &glock);
        if (res < 0)
                return res;
-       if (glock.type != F_UNLCK) {
-               fl->fl_type = glock.type;
+       /* map 9p lock type to os lock type */
+       switch (glock.type) {
+       case P9_LOCK_TYPE_RDLCK:
+               fl->fl_type = F_RDLCK;
+               break;
+       case P9_LOCK_TYPE_WRLCK:
+               fl->fl_type = F_WRLCK;
+               break;
+       case P9_LOCK_TYPE_UNLCK:
+               fl->fl_type = F_UNLCK;
+               break;
+       }
+       if (glock.type != P9_LOCK_TYPE_UNLCK) {
                fl->fl_start = glock.start;
                if (glock.length == 0)
                        fl->fl_end = OFFSET_MAX;
                else
                        fl->fl_end = glock.start + glock.length - 1;
                fl->fl_pid = glock.proc_id;
-       } else
-               fl->fl_type = F_UNLCK;
-
+       }
        return res;
 }
 
index 8bb5507..e3c03db 100644 (file)
@@ -95,15 +95,18 @@ static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode)
 /**
  * p9mode2unixmode- convert plan9 mode bits to unix mode bits
  * @v9ses: v9fs session information
- * @mode: mode to convert
+ * @stat: p9_wstat from which mode need to be derived
+ * @rdev: major number, minor number in case of device files.
  *
  */
-
-static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
+static int p9mode2unixmode(struct v9fs_session_info *v9ses,
+                          struct p9_wstat *stat, dev_t *rdev)
 {
        int res;
+       int mode = stat->mode;
 
-       res = mode & 0777;
+       res = mode & S_IALLUGO;
+       *rdev = 0;
 
        if ((mode & P9_DMDIR) == P9_DMDIR)
                res |= S_IFDIR;
@@ -116,9 +119,26 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
                 && (v9ses->nodev == 0))
                res |= S_IFIFO;
        else if ((mode & P9_DMDEVICE) && (v9fs_proto_dotu(v9ses))
-                && (v9ses->nodev == 0))
-               res |= S_IFBLK;
-       else
+                && (v9ses->nodev == 0)) {
+               char type = 0, ext[32];
+               int major = -1, minor = -1;
+
+               strncpy(ext, stat->extension, sizeof(ext));
+               sscanf(ext, "%c %u %u", &type, &major, &minor);
+               switch (type) {
+               case 'c':
+                       res |= S_IFCHR;
+                       break;
+               case 'b':
+                       res |= S_IFBLK;
+                       break;
+               default:
+                       P9_DPRINTK(P9_DEBUG_ERROR,
+                               "Unknown special type %c %s\n", type,
+                               stat->extension);
+               };
+               *rdev = MKDEV(major, minor);
+       } else
                res |= S_IFREG;
 
        if (v9fs_proto_dotu(v9ses)) {
@@ -131,7 +151,6 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
                if ((mode & P9_DMSETVTX) == P9_DMSETVTX)
                        res |= S_ISVTX;
        }
-
        return res;
 }
 
@@ -242,13 +261,13 @@ void v9fs_destroy_inode(struct inode *inode)
 }
 
 int v9fs_init_inode(struct v9fs_session_info *v9ses,
-                   struct inode *inode, int mode)
+                   struct inode *inode, int mode, dev_t rdev)
 {
        int err = 0;
 
        inode_init_owner(inode, NULL, mode);
        inode->i_blocks = 0;
-       inode->i_rdev = 0;
+       inode->i_rdev = rdev;
        inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
        inode->i_mapping->a_ops = &v9fs_addr_operations;
 
@@ -335,7 +354,7 @@ error:
  *
  */
 
-struct inode *v9fs_get_inode(struct super_block *sb, int mode)
+struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t rdev)
 {
        int err;
        struct inode *inode;
@@ -348,7 +367,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
                P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
                return ERR_PTR(-ENOMEM);
        }
-       err = v9fs_init_inode(v9ses, inode, mode);
+       err = v9fs_init_inode(v9ses, inode, mode, rdev);
        if (err) {
                iput(inode);
                return ERR_PTR(err);
@@ -435,11 +454,12 @@ void v9fs_evict_inode(struct inode *inode)
 static int v9fs_test_inode(struct inode *inode, void *data)
 {
        int umode;
+       dev_t rdev;
        struct v9fs_inode *v9inode = V9FS_I(inode);
        struct p9_wstat *st = (struct p9_wstat *)data;
        struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
 
-       umode = p9mode2unixmode(v9ses, st->mode);
+       umode = p9mode2unixmode(v9ses, st, &rdev);
        /* don't match inode of different type */
        if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
                return 0;
@@ -473,6 +493,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
                                   struct p9_wstat *st,
                                   int new)
 {
+       dev_t rdev;
        int retval, umode;
        unsigned long i_ino;
        struct inode *inode;
@@ -496,8 +517,8 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
         * later.
         */
        inode->i_ino = i_ino;
-       umode = p9mode2unixmode(v9ses, st->mode);
-       retval = v9fs_init_inode(v9ses, inode, umode);
+       umode = p9mode2unixmode(v9ses, st, &rdev);
+       retval = v9fs_init_inode(v9ses, inode, umode, rdev);
        if (retval)
                goto error;
 
@@ -532,6 +553,19 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
 }
 
 /**
+ * v9fs_at_to_dotl_flags- convert Linux specific AT flags to
+ * plan 9 AT flag.
+ * @flags: flags to convert
+ */
+static int v9fs_at_to_dotl_flags(int flags)
+{
+       int rflags = 0;
+       if (flags & AT_REMOVEDIR)
+               rflags |= P9_DOTL_AT_REMOVEDIR;
+       return rflags;
+}
+
+/**
  * v9fs_remove - helper function to remove files and directories
  * @dir: directory inode that is being deleted
  * @dentry:  dentry that is being deleted
@@ -558,7 +592,8 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
                return retval;
        }
        if (v9fs_proto_dotl(v9ses))
-               retval = p9_client_unlinkat(dfid, dentry->d_name.name, flags);
+               retval = p9_client_unlinkat(dfid, dentry->d_name.name,
+                                           v9fs_at_to_dotl_flags(flags));
        if (retval == -EOPNOTSUPP) {
                /* Try the one based on path */
                v9fid = v9fs_fid_clone(dentry);
@@ -645,13 +680,11 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
                P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
                goto error;
        }
-       d_instantiate(dentry, inode);
        err = v9fs_fid_add(dentry, fid);
        if (err < 0)
                goto error;
-
+       d_instantiate(dentry, inode);
        return ofid;
-
 error:
        if (ofid)
                p9_client_clunk(ofid);
@@ -792,6 +825,7 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
                                      struct nameidata *nameidata)
 {
+       struct dentry *res;
        struct super_block *sb;
        struct v9fs_session_info *v9ses;
        struct p9_fid *dfid, *fid;
@@ -823,22 +857,35 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
 
                return ERR_PTR(result);
        }
-
-       inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
+       /*
+        * Make sure we don't use a wrong inode due to parallel
+        * unlink. For cached mode create calls request for new
+        * inode. But with cache disabled, lookup should do this.
+        */
+       if (v9ses->cache)
+               inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
+       else
+               inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
        if (IS_ERR(inode)) {
                result = PTR_ERR(inode);
                inode = NULL;
                goto error;
        }
-
        result = v9fs_fid_add(dentry, fid);
        if (result < 0)
                goto error_iput;
-
 inst_out:
-       d_add(dentry, inode);
-       return NULL;
-
+       /*
+        * If we had a rename on the server and a parallel lookup
+        * for the new name, then make sure we instantiate with
+        * the new name. ie look up for a/b, while on server somebody
+        * moved b under k and client parallely did a lookup for
+        * k/b.
+        */
+       res = d_materialise_unique(dentry, inode);
+       if (!IS_ERR(res))
+               return res;
+       result = PTR_ERR(res);
 error_iput:
        iput(inode);
 error:
@@ -1002,7 +1049,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
                return PTR_ERR(st);
 
        v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb);
-               generic_fillattr(dentry->d_inode, stat);
+       generic_fillattr(dentry->d_inode, stat);
 
        p9stat_free(st);
        kfree(st);
@@ -1086,6 +1133,7 @@ void
 v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
        struct super_block *sb)
 {
+       mode_t mode;
        char ext[32];
        char tag_name[14];
        unsigned int i_nlink;
@@ -1121,31 +1169,9 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
                                inode->i_nlink = i_nlink;
                }
        }
-       inode->i_mode = p9mode2unixmode(v9ses, stat->mode);
-       if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) {
-               char type = 0;
-               int major = -1;
-               int minor = -1;
-
-               strncpy(ext, stat->extension, sizeof(ext));
-               sscanf(ext, "%c %u %u", &type, &major, &minor);
-               switch (type) {
-               case 'c':
-                       inode->i_mode &= ~S_IFBLK;
-                       inode->i_mode |= S_IFCHR;
-                       break;
-               case 'b':
-                       break;
-               default:
-                       P9_DPRINTK(P9_DEBUG_ERROR,
-                               "Unknown special type %c %s\n", type,
-                               stat->extension);
-               };
-               inode->i_rdev = MKDEV(major, minor);
-               init_special_inode(inode, inode->i_mode, inode->i_rdev);
-       } else
-               inode->i_rdev = 0;
-
+       mode = stat->mode & S_IALLUGO;
+       mode |= inode->i_mode & ~S_IALLUGO;
+       inode->i_mode = mode;
        i_size_write(inode, stat->length);
 
        /* not real number of blocks, but 512 byte ones ... */
@@ -1411,6 +1437,8 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
 
 int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
 {
+       int umode;
+       dev_t rdev;
        loff_t i_size;
        struct p9_wstat *st;
        struct v9fs_session_info *v9ses;
@@ -1419,6 +1447,12 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
        st = p9_client_stat(fid);
        if (IS_ERR(st))
                return PTR_ERR(st);
+       /*
+        * Don't update inode if the file type is different
+        */
+       umode = p9mode2unixmode(v9ses, st, &rdev);
+       if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
+               goto out;
 
        spin_lock(&inode->i_lock);
        /*
@@ -1430,6 +1464,7 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
        if (v9ses->cache)
                inode->i_size = i_size;
        spin_unlock(&inode->i_lock);
+out:
        p9stat_free(st);
        kfree(st);
        return 0;
index b6c8ed2..aded79f 100644 (file)
@@ -153,7 +153,8 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
         * later.
         */
        inode->i_ino = i_ino;
-       retval = v9fs_init_inode(v9ses, inode, st->st_mode);
+       retval = v9fs_init_inode(v9ses, inode,
+                                st->st_mode, new_decode_dev(st->st_rdev));
        if (retval)
                goto error;
 
@@ -190,6 +191,58 @@ v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
        return inode;
 }
 
+struct dotl_openflag_map {
+       int open_flag;
+       int dotl_flag;
+};
+
+static int v9fs_mapped_dotl_flags(int flags)
+{
+       int i;
+       int rflags = 0;
+       struct dotl_openflag_map dotl_oflag_map[] = {
+               { O_CREAT,      P9_DOTL_CREATE },
+               { O_EXCL,       P9_DOTL_EXCL },
+               { O_NOCTTY,     P9_DOTL_NOCTTY },
+               { O_TRUNC,      P9_DOTL_TRUNC },
+               { O_APPEND,     P9_DOTL_APPEND },
+               { O_NONBLOCK,   P9_DOTL_NONBLOCK },
+               { O_DSYNC,      P9_DOTL_DSYNC },
+               { FASYNC,       P9_DOTL_FASYNC },
+               { O_DIRECT,     P9_DOTL_DIRECT },
+               { O_LARGEFILE,  P9_DOTL_LARGEFILE },
+               { O_DIRECTORY,  P9_DOTL_DIRECTORY },
+               { O_NOFOLLOW,   P9_DOTL_NOFOLLOW },
+               { O_NOATIME,    P9_DOTL_NOATIME },
+               { O_CLOEXEC,    P9_DOTL_CLOEXEC },
+               { O_SYNC,       P9_DOTL_SYNC},
+       };
+       for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) {
+               if (flags & dotl_oflag_map[i].open_flag)
+                       rflags |= dotl_oflag_map[i].dotl_flag;
+       }
+       return rflags;
+}
+
+/**
+ * v9fs_open_to_dotl_flags- convert Linux specific open flags to
+ * plan 9 open flag.
+ * @flags: flags to convert
+ */
+int v9fs_open_to_dotl_flags(int flags)
+{
+       int rflags = 0;
+
+       /*
+        * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY
+        * and P9_DOTL_NOACCESS
+        */
+       rflags |= flags & O_ACCMODE;
+       rflags |= v9fs_mapped_dotl_flags(flags);
+
+       return rflags;
+}
+
 /**
  * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
  * @dir: directory inode that is being created
@@ -258,7 +311,8 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
                           "Failed to get acl values in creat %d\n", err);
                goto error;
        }
-       err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
+       err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
+                                   mode, gid, &qid);
        if (err < 0) {
                P9_DPRINTK(P9_DEBUG_VFS,
                                "p9_client_open_dotl failed in creat %d\n",
@@ -281,10 +335,10 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
                P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
                goto error;
        }
-       d_instantiate(dentry, inode);
        err = v9fs_fid_add(dentry, fid);
        if (err < 0)
                goto error;
+       d_instantiate(dentry, inode);
 
        /* Now set the ACL based on the default value */
        v9fs_set_create_acl(dentry, &dacl, &pacl);
@@ -403,10 +457,10 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
                                err);
                        goto error;
                }
-               d_instantiate(dentry, inode);
                err = v9fs_fid_add(dentry, fid);
                if (err < 0)
                        goto error;
+               d_instantiate(dentry, inode);
                fid = NULL;
        } else {
                /*
@@ -414,7 +468,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
                 * inode with stat. We need to get an inode
                 * so that we can set the acl with dentry
                 */
-               inode = v9fs_get_inode(dir->i_sb, mode);
+               inode = v9fs_get_inode(dir->i_sb, mode, 0);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
                        goto error;
@@ -540,6 +594,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
 void
 v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
 {
+       mode_t mode;
        struct v9fs_inode *v9inode = V9FS_I(inode);
 
        if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
@@ -552,11 +607,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
                inode->i_uid = stat->st_uid;
                inode->i_gid = stat->st_gid;
                inode->i_nlink = stat->st_nlink;
-               inode->i_mode = stat->st_mode;
-               inode->i_rdev = new_decode_dev(stat->st_rdev);
 
-               if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
-                       init_special_inode(inode, inode->i_mode, inode->i_rdev);
+               mode = stat->st_mode & S_IALLUGO;
+               mode |= inode->i_mode & ~S_IALLUGO;
+               inode->i_mode = mode;
 
                i_size_write(inode, stat->st_size);
                inode->i_blocks = stat->st_blocks;
@@ -657,14 +711,14 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
                                        err);
                        goto error;
                }
-               d_instantiate(dentry, inode);
                err = v9fs_fid_add(dentry, fid);
                if (err < 0)
                        goto error;
+               d_instantiate(dentry, inode);
                fid = NULL;
        } else {
                /* Not in cached mode. No need to populate inode with stat */
-               inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
+               inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
                        goto error;
@@ -810,17 +864,17 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
                                err);
                        goto error;
                }
-               d_instantiate(dentry, inode);
                err = v9fs_fid_add(dentry, fid);
                if (err < 0)
                        goto error;
+               d_instantiate(dentry, inode);
                fid = NULL;
        } else {
                /*
                 * Not in cached mode. No need to populate inode with stat.
                 * socket syscall returns a fd, so we need instantiate
                 */
-               inode = v9fs_get_inode(dir->i_sb, mode);
+               inode = v9fs_get_inode(dir->i_sb, mode, rdev);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
                        goto error;
@@ -886,6 +940,11 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
        st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
        if (IS_ERR(st))
                return PTR_ERR(st);
+       /*
+        * Don't update inode if the file type is different
+        */
+       if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
+               goto out;
 
        spin_lock(&inode->i_lock);
        /*
@@ -897,6 +956,7 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
        if (v9ses->cache)
                inode->i_size = i_size;
        spin_unlock(&inode->i_lock);
+out:
        kfree(st);
        return 0;
 }
index feef6cd..c70251d 100644 (file)
@@ -149,7 +149,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
        else
                sb->s_d_op = &v9fs_dentry_operations;
 
-       inode = v9fs_get_inode(sb, S_IFDIR | mode);
+       inode = v9fs_get_inode(sb, S_IFDIR | mode, 0);
        if (IS_ERR(inode)) {
                retval = PTR_ERR(inode);
                goto release_sb;
index ff77262..95f786e 100644 (file)
@@ -1429,6 +1429,11 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
                WARN_ON_ONCE(bdev->bd_holders);
                sync_blockdev(bdev);
                kill_bdev(bdev);
+               /* ->release can cause the old bdi to disappear,
+                * so must switch it out first
+                */
+               bdev_inode_switch_bdi(bdev->bd_inode,
+                                       &default_backing_dev_info);
        }
        if (bdev->bd_contains == bdev) {
                if (disk->fops->release)
@@ -1442,8 +1447,6 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
                disk_put_part(bdev->bd_part);
                bdev->bd_part = NULL;
                bdev->bd_disk = NULL;
-               bdev_inode_switch_bdi(bdev->bd_inode,
-                                       &default_backing_dev_info);
                if (bdev != bdev->bd_contains)
                        victim = bdev->bd_contains;
                bdev->bd_contains = NULL;
index fee028b..86c59e1 100644 (file)
@@ -1595,7 +1595,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
                r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
                dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
                     *ppath);
-       } else if (rpath) {
+       } else if (rpath || rino) {
                *ino = rino;
                *ppath = rpath;
                *pathlen = strlen(rpath);
index d47c5ec..88bacaf 100644 (file)
@@ -813,8 +813,8 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
        fsc = create_fs_client(fsopt, opt);
        if (IS_ERR(fsc)) {
                res = ERR_CAST(fsc);
-               kfree(fsopt);
-               kfree(opt);
+               destroy_mount_options(fsopt);
+               ceph_destroy_options(opt);
                goto out_final;
        }
 
index e717dfd..b7d7bd0 100644 (file)
@@ -175,6 +175,7 @@ struct mpage_da_data {
  */
 #define        EXT4_IO_END_UNWRITTEN   0x0001
 #define EXT4_IO_END_ERROR      0x0002
+#define EXT4_IO_END_QUEUED     0x0004
 
 struct ext4_io_page {
        struct page     *p_page;
index c4da98a..18d2558 100644 (file)
@@ -121,9 +121,6 @@ void ext4_evict_inode(struct inode *inode)
 
        trace_ext4_evict_inode(inode);
 
-       mutex_lock(&inode->i_mutex);
-       ext4_flush_completed_IO(inode);
-       mutex_unlock(&inode->i_mutex);
        ext4_ioend_wait(inode);
 
        if (inode->i_nlink) {
index 78839af..92f38ee 100644 (file)
@@ -142,7 +142,23 @@ static void ext4_end_io_work(struct work_struct *work)
        unsigned long           flags;
        int                     ret;
 
-       mutex_lock(&inode->i_mutex);
+       if (!mutex_trylock(&inode->i_mutex)) {
+               /*
+                * Requeue the work instead of waiting so that the work
+                * items queued after this can be processed.
+                */
+               queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work);
+               /*
+                * To prevent the ext4-dio-unwritten thread from keeping
+                * requeueing end_io requests and occupying cpu for too long,
+                * yield the cpu if it sees an end_io request that has already
+                * been requeued.
+                */
+               if (io->flag & EXT4_IO_END_QUEUED)
+                       yield();
+               io->flag |= EXT4_IO_END_QUEUED;
+               return;
+       }
        ret = ext4_end_io_nolock(io);
        if (ret < 0) {
                mutex_unlock(&inode->i_mutex);
index 2826db3..b52bc68 100644 (file)
@@ -727,25 +727,22 @@ static int follow_automount(struct path *path, unsigned flags,
        if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT))
                return -EISDIR; /* we actually want to stop here */
 
-       /*
-        * We don't want to mount if someone's just doing a stat and they've
-        * set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and
-        * appended a '/' to the name.
+       /* We don't want to mount if someone's just doing a stat -
+        * unless they're stat'ing a directory and appended a '/' to
+        * the name.
+        *
+        * We do, however, want to mount if someone wants to open or
+        * create a file of any type under the mountpoint, wants to
+        * traverse through the mountpoint or wants to open the
+        * mounted directory.  Also, autofs may mark negative dentries
+        * as being automount points.  These will need the attentions
+        * of the daemon to instantiate them before they can be used.
         */
-       if (!(flags & LOOKUP_FOLLOW)) {
-               /* We do, however, want to mount if someone wants to open or
-                * create a file of any type under the mountpoint, wants to
-                * traverse through the mountpoint or wants to open the mounted
-                * directory.
-                * Also, autofs may mark negative dentries as being automount
-                * points.  These will need the attentions of the daemon to
-                * instantiate them before they can be used.
-                */
-               if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
-                            LOOKUP_OPEN | LOOKUP_CREATE)) &&
-                   path->dentry->d_inode)
-                       return -EISDIR;
-       }
+       if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
+                    LOOKUP_OPEN | LOOKUP_CREATE)) &&
+           path->dentry->d_inode)
+               return -EISDIR;
+
        current->total_link_count++;
        if (current->total_link_count >= 40)
                return -ELOOP;
index 45174b5..feb361e 100644 (file)
@@ -335,9 +335,9 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
 #define DBGKEY(key)  ((char *)(key))
 #define DBGKEY1(key) ((char *)(key))
 
-#define ubifs_dbg_msg(fmt, ...) do {               \
-       if (0)                                     \
-               pr_debug(fmt "\n", ##__VA_ARGS__); \
+#define ubifs_dbg_msg(fmt, ...) do {                        \
+       if (0)                                              \
+               printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
 } while (0)
 
 #define dbg_dump_stack()
index b9c172b..673704f 100644 (file)
@@ -70,9 +70,8 @@ xfs_synchronize_times(
 }
 
 /*
- * If the linux inode is valid, mark it dirty.
- * Used when committing a dirty inode into a transaction so that
- * the inode will get written back by the linux code
+ * If the linux inode is valid, mark it dirty, else mark the dirty state
+ * in the XFS inode to make sure we pick it up when reclaiming the inode.
  */
 void
 xfs_mark_inode_dirty_sync(
@@ -82,6 +81,10 @@ xfs_mark_inode_dirty_sync(
 
        if (!(inode->i_state & (I_WILL_FREE|I_FREEING)))
                mark_inode_dirty_sync(inode);
+       else {
+               barrier();
+               ip->i_update_core = 1;
+       }
 }
 
 void
@@ -92,6 +95,11 @@ xfs_mark_inode_dirty(
 
        if (!(inode->i_state & (I_WILL_FREE|I_FREEING)))
                mark_inode_dirty(inode);
+       else {
+               barrier();
+               ip->i_update_core = 1;
+       }
+
 }
 
 /*
index 9a72dda..2366c54 100644 (file)
@@ -356,6 +356,8 @@ xfs_parseargs(
                        mp->m_flags |= XFS_MOUNT_DELAYLOG;
                } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
                        mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
+                       xfs_warn(mp,
+       "nodelaylog is deprecated and will be removed in Linux 3.3");
                } else if (!strcmp(this_char, MNTOPT_DISCARD)) {
                        mp->m_flags |= XFS_MOUNT_DISCARD;
                } else if (!strcmp(this_char, MNTOPT_NODISCARD)) {
@@ -877,33 +879,17 @@ xfs_log_inode(
        struct xfs_trans        *tp;
        int                     error;
 
-       xfs_iunlock(ip, XFS_ILOCK_SHARED);
        tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
        error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
-
        if (error) {
                xfs_trans_cancel(tp, 0);
-               /* we need to return with the lock hold shared */
-               xfs_ilock(ip, XFS_ILOCK_SHARED);
                return error;
        }
 
        xfs_ilock(ip, XFS_ILOCK_EXCL);
-
-       /*
-        * Note - it's possible that we might have pushed ourselves out of the
-        * way during trans_reserve which would flush the inode.  But there's
-        * no guarantee that the inode buffer has actually gone out yet (it's
-        * delwri).  Plus the buffer could be pinned anyway if it's part of
-        * an inode in another recent transaction.  So we play it safe and
-        * fire off the transaction anyway.
-        */
-       xfs_trans_ijoin(tp, ip);
+       xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       error = xfs_trans_commit(tp, 0);
-       xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
-
-       return error;
+       return xfs_trans_commit(tp, 0);
 }
 
 STATIC int
@@ -918,7 +904,9 @@ xfs_fs_write_inode(
        trace_xfs_write_inode(ip);
 
        if (XFS_FORCED_SHUTDOWN(mp))
-               return XFS_ERROR(EIO);
+               return -XFS_ERROR(EIO);
+       if (!ip->i_update_core)
+               return 0;
 
        if (wbc->sync_mode == WB_SYNC_ALL) {
                /*
@@ -929,12 +917,10 @@ xfs_fs_write_inode(
                 * of synchronous log foces dramatically.
                 */
                xfs_ioend_wait(ip);
-               xfs_ilock(ip, XFS_ILOCK_SHARED);
-               if (ip->i_update_core) {
-                       error = xfs_log_inode(ip);
-                       if (error)
-                               goto out_unlock;
-               }
+               error = xfs_log_inode(ip);
+               if (error)
+                       goto out;
+               return 0;
        } else {
                /*
                 * We make this non-blocking if the inode is contended, return
index 245bafd..c816075 100644 (file)
@@ -944,8 +944,10 @@ extern void perf_pmu_unregister(struct pmu *pmu);
 
 extern int perf_num_counters(void);
 extern const char *perf_pmu_name(void);
-extern void __perf_event_task_sched_in(struct task_struct *task);
-extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
+extern void __perf_event_task_sched_in(struct task_struct *prev,
+                                      struct task_struct *task);
+extern void __perf_event_task_sched_out(struct task_struct *prev,
+                                       struct task_struct *next);
 extern int perf_event_init_task(struct task_struct *child);
 extern void perf_event_exit_task(struct task_struct *child);
 extern void perf_event_free_task(struct task_struct *task);
@@ -1059,17 +1061,20 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 
 extern struct jump_label_key perf_sched_events;
 
-static inline void perf_event_task_sched_in(struct task_struct *task)
+static inline void perf_event_task_sched_in(struct task_struct *prev,
+                                           struct task_struct *task)
 {
        if (static_branch(&perf_sched_events))
-               __perf_event_task_sched_in(task);
+               __perf_event_task_sched_in(prev, task);
 }
 
-static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
+static inline void perf_event_task_sched_out(struct task_struct *prev,
+                                            struct task_struct *next)
 {
        perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
 
-       __perf_event_task_sched_out(task, next);
+       if (static_branch(&perf_sched_events))
+               __perf_event_task_sched_out(prev, next);
 }
 
 extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1139,10 +1144,11 @@ extern void perf_event_disable(struct perf_event *event);
 extern void perf_event_task_tick(void);
 #else
 static inline void
-perf_event_task_sched_in(struct task_struct *task)                     { }
+perf_event_task_sched_in(struct task_struct *prev,
+                        struct task_struct *task)                      { }
 static inline void
-perf_event_task_sched_out(struct task_struct *task,
-                           struct task_struct *next)                   { }
+perf_event_task_sched_out(struct task_struct *prev,
+                         struct task_struct *next)                     { }
 static inline int perf_event_init_task(struct task_struct *child)      { return 0; }
 static inline void perf_event_exit_task(struct task_struct *child)     { }
 static inline void perf_event_free_task(struct task_struct *task)      { }
index 26f6ea4..b47771a 100644 (file)
@@ -123,7 +123,7 @@ struct regulator_bulk_data {
        const char *supply;
        struct regulator *consumer;
 
-       /* Internal use */
+       /* private: Internal use */
        int ret;
 };
 
index 342dcf1..a6326ef 100644 (file)
@@ -288,6 +288,35 @@ enum p9_perm_t {
        P9_DMSETVTX = 0x00010000,
 };
 
+/* 9p2000.L open flags */
+#define P9_DOTL_RDONLY        00000000
+#define P9_DOTL_WRONLY        00000001
+#define P9_DOTL_RDWR          00000002
+#define P9_DOTL_NOACCESS      00000003
+#define P9_DOTL_CREATE        00000100
+#define P9_DOTL_EXCL          00000200
+#define P9_DOTL_NOCTTY        00000400
+#define P9_DOTL_TRUNC         00001000
+#define P9_DOTL_APPEND        00002000
+#define P9_DOTL_NONBLOCK      00004000
+#define P9_DOTL_DSYNC         00010000
+#define P9_DOTL_FASYNC        00020000
+#define P9_DOTL_DIRECT        00040000
+#define P9_DOTL_LARGEFILE     00100000
+#define P9_DOTL_DIRECTORY     00200000
+#define P9_DOTL_NOFOLLOW      00400000
+#define P9_DOTL_NOATIME       01000000
+#define P9_DOTL_CLOEXEC       02000000
+#define P9_DOTL_SYNC          04000000
+
+/* 9p2000.L at flags */
+#define P9_DOTL_AT_REMOVEDIR           0x200
+
+/* 9p2000.L lock type */
+#define P9_LOCK_TYPE_RDLCK 0
+#define P9_LOCK_TYPE_WRLCK 1
+#define P9_LOCK_TYPE_UNLCK 2
+
 /**
  * enum p9_qid_t - QID types
  * @P9_QTDIR: directory
index 408ae48..401d73b 100644 (file)
@@ -1744,6 +1744,8 @@ struct wiphy_wowlan_support {
  *     by default for perm_addr. In this case, the mask should be set to
  *     all-zeroes. In this case it is assumed that the device can handle
  *     the same number of arbitrary MAC addresses.
+ * @registered: protects ->resume and ->suspend sysfs callbacks against
+ *     unregister hardware
  * @debugfsdir: debugfs directory used for this wiphy, will be renamed
  *     automatically on wiphy renames
  * @dev: (virtual) struct device for this wiphy
index b8785e2..0f85778 100644 (file)
@@ -399,14 +399,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
        local_irq_restore(flags);
 }
 
-static inline void perf_cgroup_sched_out(struct task_struct *task)
+static inline void perf_cgroup_sched_out(struct task_struct *task,
+                                        struct task_struct *next)
 {
-       perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
+       struct perf_cgroup *cgrp1;
+       struct perf_cgroup *cgrp2 = NULL;
+
+       /*
+        * we come here when we know perf_cgroup_events > 0
+        */
+       cgrp1 = perf_cgroup_from_task(task);
+
+       /*
+        * next is NULL when called from perf_event_enable_on_exec()
+        * that will systematically cause a cgroup_switch()
+        */
+       if (next)
+               cgrp2 = perf_cgroup_from_task(next);
+
+       /*
+        * only schedule out current cgroup events if we know
+        * that we are switching to a different cgroup. Otherwise,
+        * do no touch the cgroup events.
+        */
+       if (cgrp1 != cgrp2)
+               perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
 }
 
-static inline void perf_cgroup_sched_in(struct task_struct *task)
+static inline void perf_cgroup_sched_in(struct task_struct *prev,
+                                       struct task_struct *task)
 {
-       perf_cgroup_switch(task, PERF_CGROUP_SWIN);
+       struct perf_cgroup *cgrp1;
+       struct perf_cgroup *cgrp2 = NULL;
+
+       /*
+        * we come here when we know perf_cgroup_events > 0
+        */
+       cgrp1 = perf_cgroup_from_task(task);
+
+       /* prev can never be NULL */
+       cgrp2 = perf_cgroup_from_task(prev);
+
+       /*
+        * only need to schedule in cgroup events if we are changing
+        * cgroup during ctxsw. Cgroup events were not scheduled
+        * out of ctxsw out if that was not the case.
+        */
+       if (cgrp1 != cgrp2)
+               perf_cgroup_switch(task, PERF_CGROUP_SWIN);
 }
 
 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -518,11 +558,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
 {
 }
 
-static inline void perf_cgroup_sched_out(struct task_struct *task)
+static inline void perf_cgroup_sched_out(struct task_struct *task,
+                                        struct task_struct *next)
 {
 }
 
-static inline void perf_cgroup_sched_in(struct task_struct *task)
+static inline void perf_cgroup_sched_in(struct task_struct *prev,
+                                       struct task_struct *task)
 {
 }
 
@@ -1988,7 +2030,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
         * cgroup event are system-wide mode only
         */
        if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
-               perf_cgroup_sched_out(task);
+               perf_cgroup_sched_out(task, next);
 }
 
 static void task_ctx_sched_out(struct perf_event_context *ctx)
@@ -2153,7 +2195,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
  * accessing the event control register. If a NMI hits, then it will
  * keep the event running.
  */
-void __perf_event_task_sched_in(struct task_struct *task)
+void __perf_event_task_sched_in(struct task_struct *prev,
+                               struct task_struct *task)
 {
        struct perf_event_context *ctx;
        int ctxn;
@@ -2171,7 +2214,7 @@ void __perf_event_task_sched_in(struct task_struct *task)
         * cgroup event are system-wide mode only
         */
        if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
-               perf_cgroup_sched_in(task);
+               perf_cgroup_sched_in(prev, task);
 }
 
 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
@@ -2427,7 +2470,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
         * ctxswin cgroup events which are already scheduled
         * in.
         */
-       perf_cgroup_sched_out(current);
+       perf_cgroup_sched_out(current, NULL);
 
        raw_spin_lock(&ctx->lock);
        task_ctx_sched_out(ctx);
@@ -3353,8 +3396,8 @@ static int perf_event_index(struct perf_event *event)
 }
 
 static void calc_timer_values(struct perf_event *event,
-                               u64 *running,
-                               u64 *enabled)
+                               u64 *enabled,
+                               u64 *running)
 {
        u64 now, ctx_time;
 
index ccacdbd..ec5f472 100644 (file)
@@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
        local_irq_disable();
 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
-       perf_event_task_sched_in(current);
+       perf_event_task_sched_in(prev, current);
 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
        local_irq_enable();
 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
@@ -4279,9 +4279,9 @@ pick_next_task(struct rq *rq)
 }
 
 /*
- * schedule() is the main scheduler function.
+ * __schedule() is the main scheduler function.
  */
-asmlinkage void __sched schedule(void)
+static void __sched __schedule(void)
 {
        struct task_struct *prev, *next;
        unsigned long *switch_count;
@@ -4322,16 +4322,6 @@ need_resched:
                                if (to_wakeup)
                                        try_to_wake_up_local(to_wakeup);
                        }
-
-                       /*
-                        * If we are going to sleep and we have plugged IO
-                        * queued, make sure to submit it to avoid deadlocks.
-                        */
-                       if (blk_needs_flush_plug(prev)) {
-                               raw_spin_unlock(&rq->lock);
-                               blk_schedule_flush_plug(prev);
-                               raw_spin_lock(&rq->lock);
-                       }
                }
                switch_count = &prev->nvcsw;
        }
@@ -4369,6 +4359,26 @@ need_resched:
        if (need_resched())
                goto need_resched;
 }
+
+static inline void sched_submit_work(struct task_struct *tsk)
+{
+       if (!tsk->state)
+               return;
+       /*
+        * If we are going to sleep and we have plugged IO queued,
+        * make sure to submit it to avoid deadlocks.
+        */
+       if (blk_needs_flush_plug(tsk))
+               blk_schedule_flush_plug(tsk);
+}
+
+asmlinkage void schedule(void)
+{
+       struct task_struct *tsk = current;
+
+       sched_submit_work(tsk);
+       __schedule();
+}
 EXPORT_SYMBOL(schedule);
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -4435,7 +4445,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
 
        do {
                add_preempt_count_notrace(PREEMPT_ACTIVE);
-               schedule();
+               __schedule();
                sub_preempt_count_notrace(PREEMPT_ACTIVE);
 
                /*
@@ -4463,7 +4473,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
        do {
                add_preempt_count(PREEMPT_ACTIVE);
                local_irq_enable();
-               schedule();
+               __schedule();
                local_irq_disable();
                sub_preempt_count(PREEMPT_ACTIVE);
 
@@ -5588,7 +5598,7 @@ static inline int should_resched(void)
 static void __cond_resched(void)
 {
        add_preempt_count(PREEMPT_ACTIVE);
-       schedule();
+       __schedule();
        sub_preempt_count(PREEMPT_ACTIVE);
 }
 
@@ -7443,6 +7453,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
                        struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
                        if (sd && (sd->flags & SD_OVERLAP))
                                free_sched_groups(sd->groups, 0);
+                       kfree(*per_cpu_ptr(sdd->sd, j));
                        kfree(*per_cpu_ptr(sdd->sg, j));
                        kfree(*per_cpu_ptr(sdd->sgp, j));
                }
index 59f369f..ea5e1a9 100644 (file)
@@ -441,6 +441,8 @@ static int alarm_timer_create(struct k_itimer *new_timer)
 static void alarm_timer_get(struct k_itimer *timr,
                                struct itimerspec *cur_setting)
 {
+       memset(cur_setting, 0, sizeof(struct itimerspec));
+
        cur_setting->it_interval =
                        ktime_to_timespec(timr->it.alarmtimer.period);
        cur_setting->it_value =
@@ -479,11 +481,17 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
        if (!rtcdev)
                return -ENOTSUPP;
 
-       /* Save old values */
-       old_setting->it_interval =
-                       ktime_to_timespec(timr->it.alarmtimer.period);
-       old_setting->it_value =
-                       ktime_to_timespec(timr->it.alarmtimer.node.expires);
+       /*
+        * XXX HACK! Currently we can DOS a system if the interval
+        * period on alarmtimers is too small. Cap the interval here
+        * to 100us and solve this properly in a future patch! -jstultz
+        */
+       if ((new_setting->it_interval.tv_sec == 0) &&
+                       (new_setting->it_interval.tv_nsec < 100000))
+               new_setting->it_interval.tv_nsec = 100000;
+
+       if (old_setting)
+               alarm_timer_get(timr, old_setting);
 
        /* If the timer was already set, cancel it */
        alarm_cancel(&timr->it.alarmtimer);
index 175b513..e317583 100644 (file)
@@ -263,7 +263,6 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
 {
        int in, out, inp, outp;
        struct virtio_chan *chan = client->trans;
-       char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
        unsigned long flags;
        size_t pdata_off = 0;
        struct trans_rpage_info *rpinfo = NULL;
@@ -346,7 +345,8 @@ req_retry_pinned:
                 * Arrange in such a way that server places header in the
                 * alloced memory and payload onto the user buffer.
                 */
-               inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11);
+               inp = pack_sg_list(chan->sg, out,
+                                  VIRTQUEUE_NUM, req->rc->sdata, 11);
                /*
                 * Running executables in the filesystem may result in
                 * a read request with kernel buffer as opposed to user buffer.
@@ -366,8 +366,8 @@ req_retry_pinned:
                }
                in += inp;
        } else {
-               in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata,
-                               req->rc->capacity);
+               in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
+                                 req->rc->sdata, req->rc->capacity);
        }
 
        err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
@@ -592,7 +592,14 @@ static struct p9_trans_module p9_virtio_trans = {
        .close = p9_virtio_close,
        .request = p9_virtio_request,
        .cancel = p9_virtio_cancel,
-       .maxsize = PAGE_SIZE*VIRTQUEUE_NUM,
+
+       /*
+        * We leave one entry for input and one entry for response
+        * headers. We also skip one more entry to accomodate, address
+        * that are not at page boundary, that can result in an extra
+        * page in zero copy.
+        */
+       .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
        .pref = P9_TRANS_PREF_PAYLOAD_SEP,
        .def = 0,
        .owner = THIS_MODULE,
index d5f2d97..1f4cb30 100644 (file)
@@ -7,27 +7,37 @@
 
 #include <linux/ceph/msgpool.h>
 
-static void *alloc_fn(gfp_t gfp_mask, void *arg)
+static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
 {
        struct ceph_msgpool *pool = arg;
-       void *p;
+       struct ceph_msg *msg;
 
-       p = ceph_msg_new(0, pool->front_len, gfp_mask);
-       if (!p)
-               pr_err("msgpool %s alloc failed\n", pool->name);
-       return p;
+       msg = ceph_msg_new(0, pool->front_len, gfp_mask);
+       if (!msg) {
+               dout("msgpool_alloc %s failed\n", pool->name);
+       } else {
+               dout("msgpool_alloc %s %p\n", pool->name, msg);
+               msg->pool = pool;
+       }
+       return msg;
 }
 
-static void free_fn(void *element, void *arg)
+static void msgpool_free(void *element, void *arg)
 {
-       ceph_msg_put(element);
+       struct ceph_msgpool *pool = arg;
+       struct ceph_msg *msg = element;
+
+       dout("msgpool_release %s %p\n", pool->name, msg);
+       msg->pool = NULL;
+       ceph_msg_put(msg);
 }
 
 int ceph_msgpool_init(struct ceph_msgpool *pool,
                      int front_len, int size, bool blocking, const char *name)
 {
+       dout("msgpool %s init\n", name);
        pool->front_len = front_len;
-       pool->pool = mempool_create(size, alloc_fn, free_fn, pool);
+       pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool);
        if (!pool->pool)
                return -ENOMEM;
        pool->name = name;
@@ -36,14 +46,17 @@ int ceph_msgpool_init(struct ceph_msgpool *pool,
 
 void ceph_msgpool_destroy(struct ceph_msgpool *pool)
 {
+       dout("msgpool %s destroy\n", pool->name);
        mempool_destroy(pool->pool);
 }
 
 struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
                                  int front_len)
 {
+       struct ceph_msg *msg;
+
        if (front_len > pool->front_len) {
-               pr_err("msgpool_get pool %s need front %d, pool size is %d\n",
+               dout("msgpool_get %s need front %d, pool size is %d\n",
                       pool->name, front_len, pool->front_len);
                WARN_ON(1);
 
@@ -51,14 +64,19 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
                return ceph_msg_new(0, front_len, GFP_NOFS);
        }
 
-       return mempool_alloc(pool->pool, GFP_NOFS);
+       msg = mempool_alloc(pool->pool, GFP_NOFS);
+       dout("msgpool_get %s %p\n", pool->name, msg);
+       return msg;
 }
 
 void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
 {
+       dout("msgpool_put %s %p\n", pool->name, msg);
+
        /* reset msg front_len; user may have changed it */
        msg->front.iov_len = pool->front_len;
        msg->hdr.front_len = cpu_to_le32(pool->front_len);
 
        kref_init(&msg->kref);  /* retake single ref */
+       mempool_free(msg, pool->pool);
 }
index ce310ee..16836a7 100644 (file)
@@ -685,6 +685,18 @@ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
        put_osd(osd);
 }
 
+static void remove_all_osds(struct ceph_osd_client *osdc)
+{
+       dout("__remove_old_osds %p\n", osdc);
+       mutex_lock(&osdc->request_mutex);
+       while (!RB_EMPTY_ROOT(&osdc->osds)) {
+               struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
+                                               struct ceph_osd, o_node);
+               __remove_osd(osdc, osd);
+       }
+       mutex_unlock(&osdc->request_mutex);
+}
+
 static void __move_osd_to_lru(struct ceph_osd_client *osdc,
                              struct ceph_osd *osd)
 {
@@ -701,14 +713,14 @@ static void __remove_osd_from_lru(struct ceph_osd *osd)
                list_del_init(&osd->o_osd_lru);
 }
 
-static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all)
+static void remove_old_osds(struct ceph_osd_client *osdc)
 {
        struct ceph_osd *osd, *nosd;
 
        dout("__remove_old_osds %p\n", osdc);
        mutex_lock(&osdc->request_mutex);
        list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
-               if (!remove_all && time_before(jiffies, osd->lru_ttl))
+               if (time_before(jiffies, osd->lru_ttl))
                        break;
                __remove_osd(osdc, osd);
        }
@@ -751,6 +763,7 @@ static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
        struct rb_node *parent = NULL;
        struct ceph_osd *osd = NULL;
 
+       dout("__insert_osd %p osd%d\n", new, new->o_osd);
        while (*p) {
                parent = *p;
                osd = rb_entry(parent, struct ceph_osd, o_node);
@@ -1144,7 +1157,7 @@ static void handle_osds_timeout(struct work_struct *work)
 
        dout("osds timeout\n");
        down_read(&osdc->map_sem);
-       remove_old_osds(osdc, 0);
+       remove_old_osds(osdc);
        up_read(&osdc->map_sem);
 
        schedule_delayed_work(&osdc->osds_timeout_work,
@@ -1862,8 +1875,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
                ceph_osdmap_destroy(osdc->osdmap);
                osdc->osdmap = NULL;
        }
-       remove_old_osds(osdc, 1);
-       WARN_ON(!RB_EMPTY_ROOT(&osdc->osds));
+       remove_all_osds(osdc);
        mempool_destroy(osdc->req_mempool);
        ceph_msgpool_destroy(&osdc->msgpool_op);
        ceph_msgpool_destroy(&osdc->msgpool_op_reply);