Merge tag 'locks-v5.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/jlayton...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 29 Dec 2019 17:50:57 +0000 (09:50 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 29 Dec 2019 17:50:57 +0000 (09:50 -0800)
Pull /proc/locks formatting fix from Jeff Layton:
 "This is a trivial fix for a _very_ long standing bug in /proc/locks
  formatting. Ordinarily, I'd wait for the merge window for something
  like this, but it is making it difficult to validate some overlayfs
  fixes.

  I've also gone ahead and marked this for stable"

* tag 'locks-v5.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/jlayton/linux:
  locks: print unsigned ino in /proc/locks

56 files changed:
Documentation/dev-tools/kunit/start.rst
MAINTAINERS
block/compat_ioctl.c
drivers/ata/ahci_brcm.c
drivers/ata/libahci_platform.c
drivers/ata/libata-core.c
drivers/ata/sata_fsl.c
drivers/ata/sata_mv.c
drivers/ata/sata_nv.c
drivers/block/pktcdvd.c
drivers/devfreq/Kconfig
drivers/gpio/Kconfig
drivers/gpio/gpio-aspeed-sgpio.c
drivers/gpio/gpio-mockup.c
drivers/gpio/gpio-mpc8xxx.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-xgs-iproc.c
drivers/gpio/gpio-xtensa.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_frontbuffer.c
drivers/gpu/drm/i915/display/intel_frontbuffer.h
drivers/gpu/drm/i915/display/intel_overlay.c
drivers/gpu/drm/i915/gem/i915_gem_clflush.c
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/i915_pmu.h
drivers/gpu/drm/i915/i915_vma.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/target/target_core_iblock.c
fs/cifs/cifsglob.h
fs/cifs/readdir.c
fs/cifs/smb2file.c
fs/io-wq.c
fs/io_uring.c
include/linux/ahci_platform.h
include/linux/libata.h
tools/testing/kunit/kunit.py
tools/testing/kunit/kunit_kernel.py
tools/testing/kunit/kunit_tool_test.py
tools/testing/selftests/filesystems/epoll/Makefile
tools/testing/selftests/firmware/fw_lib.sh
tools/testing/selftests/livepatch/functions.sh
tools/testing/selftests/livepatch/test-state.sh
tools/testing/selftests/rseq/param_test.c
tools/testing/selftests/rseq/rseq.h
tools/testing/selftests/rseq/settings [new file with mode: 0644]

index 9d6db89..4e1d24d 100644 (file)
@@ -24,19 +24,16 @@ The wrapper can be run with:
 For more information on this wrapper (also called kunit_tool) checkout the
 :doc:`kunit-tool` page.
 
-Creating a kunitconfig
-======================
+Creating a .kunitconfig
+=======================
 The Python script is a thin wrapper around Kbuild. As such, it needs to be
-configured with a ``kunitconfig`` file. This file essentially contains the
+configured with a ``.kunitconfig`` file. This file essentially contains the
 regular Kernel config, with the specific test targets as well.
 
 .. code-block:: bash
 
-       git clone -b master https://kunit.googlesource.com/kunitconfig $PATH_TO_KUNITCONFIG_REPO
        cd $PATH_TO_LINUX_REPO
-       ln -s $PATH_TO_KUNIT_CONFIG_REPO/kunitconfig kunitconfig
-
-You may want to add kunitconfig to your local gitignore.
+       cp arch/um/configs/kunit_defconfig .kunitconfig
 
 Verifying KUnit Works
 ---------------------
@@ -151,7 +148,7 @@ and the following to ``drivers/misc/Makefile``:
 
        obj-$(CONFIG_MISC_EXAMPLE_TEST) += example-test.o
 
-Now add it to your ``kunitconfig``:
+Now add it to your ``.kunitconfig``:
 
 .. code-block:: none
 
index ffa3371..e09bd92 100644 (file)
@@ -7034,6 +7034,7 @@ L:        linux-acpi@vger.kernel.org
 S:     Maintained
 F:     Documentation/firmware-guide/acpi/gpio-properties.rst
 F:     drivers/gpio/gpiolib-acpi.c
+F:     drivers/gpio/gpiolib-acpi.h
 
 GPIO IR Transmitter
 M:     Sean Young <sean@mess.org>
index 6ca015f..3ed7a0f 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/compat.h>
 #include <linux/elevator.h>
 #include <linux/hdreg.h>
+#include <linux/pr.h>
 #include <linux/slab.h>
 #include <linux/syscalls.h>
 #include <linux/types.h>
@@ -354,6 +355,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
         * but we call blkdev_ioctl, which gets the lock for us
         */
        case BLKRRPART:
+       case BLKREPORTZONE:
+       case BLKRESETZONE:
+       case BLKOPENZONE:
+       case BLKCLOSEZONE:
+       case BLKFINISHZONE:
+       case BLKGETZONESZ:
+       case BLKGETNRZONES:
                return blkdev_ioctl(bdev, mode, cmd,
                                (unsigned long)compat_ptr(arg));
        case BLKBSZSET_32:
@@ -401,6 +409,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        case BLKTRACETEARDOWN: /* compatible */
                ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
                return ret;
+       case IOC_PR_REGISTER:
+       case IOC_PR_RESERVE:
+       case IOC_PR_RELEASE:
+       case IOC_PR_PREEMPT:
+       case IOC_PR_PREEMPT_ABORT:
+       case IOC_PR_CLEAR:
+               return blkdev_ioctl(bdev, mode, cmd,
+                               (unsigned long)compat_ptr(arg));
        default:
                if (disk->fops->compat_ioctl)
                        ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
index f41744b..66a570d 100644 (file)
@@ -76,8 +76,7 @@ enum brcm_ahci_version {
 };
 
 enum brcm_ahci_quirks {
-       BRCM_AHCI_QUIRK_NO_NCQ          = BIT(0),
-       BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1),
+       BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(0),
 };
 
 struct brcm_ahci_priv {
@@ -213,19 +212,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv)
                        brcm_sata_phy_disable(priv, i);
 }
 
-static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
+static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv,
                                  struct brcm_ahci_priv *priv)
 {
-       void __iomem *ahci;
-       struct resource *res;
        u32 impl;
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci");
-       ahci = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(ahci))
-               return 0;
-
-       impl = readl(ahci + HOST_PORTS_IMPL);
+       impl = readl(hpriv->mmio + HOST_PORTS_IMPL);
 
        if (fls(impl) > SATA_TOP_MAX_PHYS)
                dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n",
@@ -233,9 +225,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
        else if (!impl)
                dev_info(priv->dev, "no ports found\n");
 
-       devm_iounmap(&pdev->dev, ahci);
-       devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
-
        return impl;
 }
 
@@ -285,6 +274,13 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev,
        /* Perform the SATA PHY reset sequence */
        brcm_sata_phy_disable(priv, ap->port_no);
 
+       /* Reset the SATA clock */
+       ahci_platform_disable_clks(hpriv);
+       msleep(10);
+
+       ahci_platform_enable_clks(hpriv);
+       msleep(10);
+
        /* Bring the PHY back on */
        brcm_sata_phy_enable(priv, ap->port_no);
 
@@ -347,11 +343,10 @@ static int brcm_ahci_suspend(struct device *dev)
        struct ata_host *host = dev_get_drvdata(dev);
        struct ahci_host_priv *hpriv = host->private_data;
        struct brcm_ahci_priv *priv = hpriv->plat_data;
-       int ret;
 
-       ret = ahci_platform_suspend(dev);
        brcm_sata_phys_disable(priv);
-       return ret;
+
+       return ahci_platform_suspend(dev);
 }
 
 static int brcm_ahci_resume(struct device *dev)
@@ -359,11 +354,44 @@ static int brcm_ahci_resume(struct device *dev)
        struct ata_host *host = dev_get_drvdata(dev);
        struct ahci_host_priv *hpriv = host->private_data;
        struct brcm_ahci_priv *priv = hpriv->plat_data;
+       int ret;
+
+       /* Make sure clocks are turned on before re-configuration */
+       ret = ahci_platform_enable_clks(hpriv);
+       if (ret)
+               return ret;
 
        brcm_sata_init(priv);
        brcm_sata_phys_enable(priv);
        brcm_sata_alpm_init(hpriv);
-       return ahci_platform_resume(dev);
+
+       /* Since we had to enable clocks earlier on, we cannot use
+        * ahci_platform_resume() as-is since a second call to
+        * ahci_platform_enable_resources() would bump up the resources
+        * (regulators, clocks, PHYs) count artificially so we copy the part
+        * after ahci_platform_enable_resources().
+        */
+       ret = ahci_platform_enable_phys(hpriv);
+       if (ret)
+               goto out_disable_phys;
+
+       ret = ahci_platform_resume_host(dev);
+       if (ret)
+               goto out_disable_platform_phys;
+
+       /* We resumed so update PM runtime state */
+       pm_runtime_disable(dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
+       return 0;
+
+out_disable_platform_phys:
+       ahci_platform_disable_phys(hpriv);
+out_disable_phys:
+       brcm_sata_phys_disable(priv);
+       ahci_platform_disable_clks(hpriv);
+       return ret;
 }
 #endif
 
@@ -410,44 +438,71 @@ static int brcm_ahci_probe(struct platform_device *pdev)
        if (!IS_ERR_OR_NULL(priv->rcdev))
                reset_control_deassert(priv->rcdev);
 
-       if ((priv->version == BRCM_SATA_BCM7425) ||
-               (priv->version == BRCM_SATA_NSP)) {
-               priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
+       hpriv = ahci_platform_get_resources(pdev, 0);
+       if (IS_ERR(hpriv)) {
+               ret = PTR_ERR(hpriv);
+               goto out_reset;
+       }
+
+       hpriv->plat_data = priv;
+       hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO;
+
+       switch (priv->version) {
+       case BRCM_SATA_BCM7425:
+               hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE;
+               /* fall through */
+       case BRCM_SATA_NSP:
+               hpriv->flags |= AHCI_HFLAG_NO_NCQ;
                priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
+               break;
+       default:
+               break;
        }
 
+       ret = ahci_platform_enable_clks(hpriv);
+       if (ret)
+               goto out_reset;
+
+       /* Must be first so as to configure endianness including that
+        * of the standard AHCI register space.
+        */
        brcm_sata_init(priv);
 
-       priv->port_mask = brcm_ahci_get_portmask(pdev, priv);
-       if (!priv->port_mask)
-               return -ENODEV;
+       /* Initializes priv->port_mask which is used below */
+       priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
+       if (!priv->port_mask) {
+               ret = -ENODEV;
+               goto out_disable_clks;
+       }
 
+       /* Must be done before ahci_platform_enable_phys() */
        brcm_sata_phys_enable(priv);
 
-       hpriv = ahci_platform_get_resources(pdev, 0);
-       if (IS_ERR(hpriv))
-               return PTR_ERR(hpriv);
-       hpriv->plat_data = priv;
-       hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
-
        brcm_sata_alpm_init(hpriv);
 
-       ret = ahci_platform_enable_resources(hpriv);
+       ret = ahci_platform_enable_phys(hpriv);
        if (ret)
-               return ret;
-
-       if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ)
-               hpriv->flags |= AHCI_HFLAG_NO_NCQ;
-       hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO;
+               goto out_disable_phys;
 
        ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info,
                                      &ahci_platform_sht);
        if (ret)
-               return ret;
+               goto out_disable_platform_phys;
 
        dev_info(dev, "Broadcom AHCI SATA3 registered\n");
 
        return 0;
+
+out_disable_platform_phys:
+       ahci_platform_disable_phys(hpriv);
+out_disable_phys:
+       brcm_sata_phys_disable(priv);
+out_disable_clks:
+       ahci_platform_disable_clks(hpriv);
+out_reset:
+       if (!IS_ERR_OR_NULL(priv->rcdev))
+               reset_control_assert(priv->rcdev);
+       return ret;
 }
 
 static int brcm_ahci_remove(struct platform_device *pdev)
@@ -457,12 +512,12 @@ static int brcm_ahci_remove(struct platform_device *pdev)
        struct brcm_ahci_priv *priv = hpriv->plat_data;
        int ret;
 
+       brcm_sata_phys_disable(priv);
+
        ret = ata_platform_remove_one(pdev);
        if (ret)
                return ret;
 
-       brcm_sata_phys_disable(priv);
-
        return 0;
 }
 
index 8befce0..129556f 100644 (file)
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops);
  * RETURNS:
  * 0 on success otherwise a negative error code
  */
-static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
 {
        int rc, i;
 
@@ -74,6 +74,7 @@ disable_phys:
        }
        return rc;
 }
+EXPORT_SYMBOL_GPL(ahci_platform_enable_phys);
 
 /**
  * ahci_platform_disable_phys - Disable PHYs
@@ -81,7 +82,7 @@ disable_phys:
  *
  * This function disables all PHYs found in hpriv->phys.
  */
-static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
 {
        int i;
 
@@ -90,6 +91,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
                phy_exit(hpriv->phys[i]);
        }
 }
+EXPORT_SYMBOL_GPL(ahci_platform_disable_phys);
 
 /**
  * ahci_platform_enable_clks - Enable platform clocks
index e9017c5..6f4ab5c 100644 (file)
@@ -5329,6 +5329,30 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
 }
 
 /**
+ *     ata_qc_get_active - get bitmask of active qcs
+ *     @ap: port in question
+ *
+ *     LOCKING:
+ *     spin_lock_irqsave(host lock)
+ *
+ *     RETURNS:
+ *     Bitmask of active qcs
+ */
+u64 ata_qc_get_active(struct ata_port *ap)
+{
+       u64 qc_active = ap->qc_active;
+
+       /* ATA_TAG_INTERNAL is sent to hw as tag 0 */
+       if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
+               qc_active |= (1 << 0);
+               qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
+       }
+
+       return qc_active;
+}
+EXPORT_SYMBOL_GPL(ata_qc_get_active);
+
+/**
  *     ata_qc_complete_multiple - Complete multiple qcs successfully
  *     @ap: port in question
  *     @qc_active: new qc_active mask
index 9239615..d55ee24 100644 (file)
@@ -1280,7 +1280,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
                                     i, ioread32(hcr_base + CC),
                                     ioread32(hcr_base + CA));
                }
-               ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+               ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
                return;
 
        } else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) {
index 277f119..d7228f8 100644 (file)
@@ -2829,7 +2829,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
        }
 
        if (work_done) {
-               ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+               ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
 
                /* Update the software queue position index in hardware */
                writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
index f3e62f5..eb9dc14 100644 (file)
@@ -984,7 +984,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
                                        check_commands = 0;
                                check_commands &= ~(1 << pos);
                        }
-                       ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+                       ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
                }
        }
 
index ee67bf9..861fc65 100644 (file)
@@ -2707,7 +2707,7 @@ static const struct block_device_operations pktcdvd_ops = {
        .release =              pkt_close,
        .ioctl =                pkt_ioctl,
 #ifdef CONFIG_COMPAT
-       .ioctl =                pkt_compat_ioctl,
+       .compat_ioctl =         pkt_compat_ioctl,
 #endif
        .check_events =         pkt_check_events,
 };
index defe1d4..3553583 100644 (file)
@@ -83,7 +83,6 @@ config ARM_EXYNOS_BUS_DEVFREQ
        select DEVFREQ_GOV_PASSIVE
        select DEVFREQ_EVENT_EXYNOS_PPMU
        select PM_DEVFREQ_EVENT
-       select PM_OPP
        help
          This adds the common DEVFREQ driver for Exynos Memory bus. Exynos
          Memory bus has one more group of memory bus (e.g, MIF and INT block).
@@ -98,7 +97,7 @@ config ARM_TEGRA_DEVFREQ
                ARCH_TEGRA_132_SOC || ARCH_TEGRA_124_SOC || \
                ARCH_TEGRA_210_SOC || \
                COMPILE_TEST
-       select PM_OPP
+       depends on COMMON_CLK
        help
          This adds the DEVFREQ driver for the Tegra family of SoCs.
          It reads ACTMON counters of memory controllers and adjusts the
@@ -109,7 +108,6 @@ config ARM_TEGRA20_DEVFREQ
        depends on (TEGRA_MC && TEGRA20_EMC) || COMPILE_TEST
        depends on COMMON_CLK
        select DEVFREQ_GOV_SIMPLE_ONDEMAND
-       select PM_OPP
        help
          This adds the DEVFREQ driver for the Tegra20 family of SoCs.
          It reads Memory Controller counters and adjusts the operating
@@ -121,7 +119,6 @@ config ARM_RK3399_DMC_DEVFREQ
        select DEVFREQ_EVENT_ROCKCHIP_DFI
        select DEVFREQ_GOV_SIMPLE_ONDEMAND
        select PM_DEVFREQ_EVENT
-       select PM_OPP
        help
           This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
           It sets the frequency for the memory controller and reads the usage counts
index 8adffd4..6ab25fe 100644 (file)
@@ -553,8 +553,8 @@ config GPIO_TEGRA
 
 config GPIO_TEGRA186
        tristate "NVIDIA Tegra186 GPIO support"
-       default ARCH_TEGRA_186_SOC
-       depends on ARCH_TEGRA_186_SOC || COMPILE_TEST
+       default ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC
+       depends on ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC || COMPILE_TEST
        depends on OF_GPIO
        select GPIOLIB_IRQCHIP
        select IRQ_DOMAIN_HIERARCHY
index 7e99860..8319812 100644 (file)
@@ -107,7 +107,7 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio,
                return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS;
        default:
                /* acturally if code runs to here, it's an error case */
-               BUG_ON(1);
+               BUG();
        }
 }
 
index 56d647a..c4fdc19 100644 (file)
@@ -226,7 +226,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
        int direction;
 
        mutex_lock(&chip->lock);
-       direction = !chip->lines[offset].dir;
+       direction = chip->lines[offset].dir;
        mutex_unlock(&chip->lock);
 
        return direction;
@@ -395,7 +395,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
        struct gpio_chip *gc;
        struct device *dev;
        const char *name;
-       int rv, base;
+       int rv, base, i;
        u16 ngpio;
 
        dev = &pdev->dev;
@@ -447,6 +447,9 @@ static int gpio_mockup_probe(struct platform_device *pdev)
        if (!chip->lines)
                return -ENOMEM;
 
+       for (i = 0; i < gc->ngpio; i++)
+               chip->lines[i].dir = GPIO_LINE_DIRECTION_IN;
+
        if (device_property_read_bool(dev, "named-gpio-lines")) {
                rv = gpio_mockup_name_lines(dev, chip);
                if (rv)
index f1e164c..5ae30de 100644 (file)
@@ -346,6 +346,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        gc = &mpc8xxx_gc->gc;
+       gc->parent = &pdev->dev;
 
        if (of_property_read_bool(np, "little-endian")) {
                ret = bgpio_init(gc, &pdev->dev, 4,
index 6652bee..9853547 100644 (file)
@@ -568,16 +568,18 @@ static void pca953x_irq_mask(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct pca953x_chip *chip = gpiochip_get_data(gc);
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
-       chip->irq_mask[d->hwirq / BANK_SZ] &= ~BIT(d->hwirq % BANK_SZ);
+       clear_bit(hwirq, chip->irq_mask);
 }
 
 static void pca953x_irq_unmask(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct pca953x_chip *chip = gpiochip_get_data(gc);
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
-       chip->irq_mask[d->hwirq / BANK_SZ] |= BIT(d->hwirq % BANK_SZ);
+       set_bit(hwirq, chip->irq_mask);
 }
 
 static int pca953x_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -635,8 +637,7 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct pca953x_chip *chip = gpiochip_get_data(gc);
-       int bank_nb = d->hwirq / BANK_SZ;
-       u8 mask = BIT(d->hwirq % BANK_SZ);
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
        if (!(type & IRQ_TYPE_EDGE_BOTH)) {
                dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
@@ -644,15 +645,8 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
                return -EINVAL;
        }
 
-       if (type & IRQ_TYPE_EDGE_FALLING)
-               chip->irq_trig_fall[bank_nb] |= mask;
-       else
-               chip->irq_trig_fall[bank_nb] &= ~mask;
-
-       if (type & IRQ_TYPE_EDGE_RISING)
-               chip->irq_trig_raise[bank_nb] |= mask;
-       else
-               chip->irq_trig_raise[bank_nb] &= ~mask;
+       assign_bit(hwirq, chip->irq_trig_fall, type & IRQ_TYPE_EDGE_FALLING);
+       assign_bit(hwirq, chip->irq_trig_raise, type & IRQ_TYPE_EDGE_RISING);
 
        return 0;
 }
@@ -661,10 +655,10 @@ static void pca953x_irq_shutdown(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct pca953x_chip *chip = gpiochip_get_data(gc);
-       u8 mask = BIT(d->hwirq % BANK_SZ);
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
-       chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask;
-       chip->irq_trig_fall[d->hwirq / BANK_SZ] &= ~mask;
+       clear_bit(hwirq, chip->irq_trig_raise);
+       clear_bit(hwirq, chip->irq_trig_fall);
 }
 
 static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pending)
index 773e5c2..b21c2e4 100644 (file)
@@ -280,7 +280,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int __exit iproc_gpio_remove(struct platform_device *pdev)
+static int iproc_gpio_remove(struct platform_device *pdev)
 {
        struct iproc_gpio_chip *chip;
 
index 08d7c3b..c8af34a 100644 (file)
@@ -44,15 +44,14 @@ static inline unsigned long enable_cp(unsigned long *cpenable)
        unsigned long flags;
 
        local_irq_save(flags);
-       RSR_CPENABLE(*cpenable);
-       WSR_CPENABLE(*cpenable | BIT(XCHAL_CP_ID_XTIOP));
-
+       *cpenable = xtensa_get_sr(cpenable);
+       xtensa_set_sr(*cpenable | BIT(XCHAL_CP_ID_XTIOP), cpenable);
        return flags;
 }
 
 static inline void disable_cp(unsigned long flags, unsigned long cpenable)
 {
-       WSR_CPENABLE(cpenable);
+       xtensa_set_sr(cpenable, cpenable);
        local_irq_restore(flags);
 }
 
index 9913886..78a16e4 100644 (file)
@@ -220,6 +220,14 @@ int gpiod_get_direction(struct gpio_desc *desc)
        chip = gpiod_to_chip(desc);
        offset = gpio_chip_hwgpio(desc);
 
+       /*
+        * Open drain emulation using input mode may incorrectly report
+        * input here, fix that up.
+        */
+       if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) &&
+           test_bit(FLAG_IS_OUT, &desc->flags))
+               return 0;
+
        if (!chip->get_direction)
                return -ENOTSUPP;
 
@@ -4472,8 +4480,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
 
                if (chip->ngpio <= p->chip_hwnum) {
                        dev_err(dev,
-                               "requested GPIO %d is out of range [0..%d] for chip %s\n",
-                               idx, chip->ngpio, chip->label);
+                               "requested GPIO %u (%u) is out of range [0..%u] for chip %s\n",
+                               idx, p->chip_hwnum, chip->ngpio - 1,
+                               chip->label);
                        return ERR_PTR(-EINVAL);
                }
 
index 6f5e3bd..effc425 100644 (file)
@@ -15112,7 +15112,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
                return ret;
 
        fb_obj_bump_render_priority(obj);
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
 
        if (!new_plane_state->base.fence) { /* implicit fencing */
                struct dma_fence *fence;
index 84b164f..6cb02c9 100644 (file)
@@ -229,11 +229,11 @@ static void frontbuffer_release(struct kref *ref)
                vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
        spin_unlock(&obj->vma.lock);
 
-       obj->frontbuffer = NULL;
+       RCU_INIT_POINTER(obj->frontbuffer, NULL);
        spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
 
        i915_gem_object_put(obj);
-       kfree(front);
+       kfree_rcu(front, rcu);
 }
 
 struct intel_frontbuffer *
@@ -242,11 +242,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct intel_frontbuffer *front;
 
-       spin_lock(&i915->fb_tracking.lock);
-       front = obj->frontbuffer;
-       if (front)
-               kref_get(&front->ref);
-       spin_unlock(&i915->fb_tracking.lock);
+       front = __intel_frontbuffer_get(obj);
        if (front)
                return front;
 
@@ -262,13 +258,13 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
                         i915_active_may_sleep(frontbuffer_retire));
 
        spin_lock(&i915->fb_tracking.lock);
-       if (obj->frontbuffer) {
+       if (rcu_access_pointer(obj->frontbuffer)) {
                kfree(front);
-               front = obj->frontbuffer;
+               front = rcu_dereference_protected(obj->frontbuffer, true);
                kref_get(&front->ref);
        } else {
                i915_gem_object_get(obj);
-               obj->frontbuffer = front;
+               rcu_assign_pointer(obj->frontbuffer, front);
        }
        spin_unlock(&i915->fb_tracking.lock);
 
index adc64d6..6d41f53 100644 (file)
 #include <linux/atomic.h>
 #include <linux/kref.h>
 
+#include "gem/i915_gem_object_types.h"
 #include "i915_active.h"
 
 struct drm_i915_private;
-struct drm_i915_gem_object;
 
 enum fb_op_origin {
        ORIGIN_GTT,
@@ -45,6 +45,7 @@ struct intel_frontbuffer {
        atomic_t bits;
        struct i915_active write;
        struct drm_i915_gem_object *obj;
+       struct rcu_head rcu;
 };
 
 void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
@@ -54,6 +55,35 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
 void intel_frontbuffer_flip(struct drm_i915_private *i915,
                            unsigned frontbuffer_bits);
 
+void intel_frontbuffer_put(struct intel_frontbuffer *front);
+
+static inline struct intel_frontbuffer *
+__intel_frontbuffer_get(const struct drm_i915_gem_object *obj)
+{
+       struct intel_frontbuffer *front;
+
+       if (likely(!rcu_access_pointer(obj->frontbuffer)))
+               return NULL;
+
+       rcu_read_lock();
+       do {
+               front = rcu_dereference(obj->frontbuffer);
+               if (!front)
+                       break;
+
+               if (unlikely(!kref_get_unless_zero(&front->ref)))
+                       continue;
+
+               if (likely(front == rcu_access_pointer(obj->frontbuffer)))
+                       break;
+
+               intel_frontbuffer_put(front);
+       } while (1);
+       rcu_read_unlock();
+
+       return front;
+}
+
 struct intel_frontbuffer *
 intel_frontbuffer_get(struct drm_i915_gem_object *obj);
 
@@ -119,6 +149,4 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
                             struct intel_frontbuffer *new,
                             unsigned int frontbuffer_bits);
 
-void intel_frontbuffer_put(struct intel_frontbuffer *front);
-
 #endif /* __INTEL_FRONTBUFFER_H__ */
index 848ce07..8a98a1a 100644 (file)
@@ -279,12 +279,21 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
                                       struct i915_vma *vma)
 {
        enum pipe pipe = overlay->crtc->pipe;
+       struct intel_frontbuffer *from = NULL, *to = NULL;
 
        WARN_ON(overlay->old_vma);
 
-       intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL,
-                               vma ? vma->obj->frontbuffer : NULL,
-                               INTEL_FRONTBUFFER_OVERLAY(pipe));
+       if (overlay->vma)
+               from = intel_frontbuffer_get(overlay->vma->obj);
+       if (vma)
+               to = intel_frontbuffer_get(vma->obj);
+
+       intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+       if (to)
+               intel_frontbuffer_put(to);
+       if (from)
+               intel_frontbuffer_put(from);
 
        intel_frontbuffer_flip_prepare(overlay->i915,
                                       INTEL_FRONTBUFFER_OVERLAY(pipe));
@@ -766,7 +775,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                ret = PTR_ERR(vma);
                goto out_pin_section;
        }
-       intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB);
+       i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
 
        if (!overlay->active) {
                u32 oconfig;
index b9f504b..18ee708 100644 (file)
@@ -20,7 +20,8 @@ static void __do_clflush(struct drm_i915_gem_object *obj)
 {
        GEM_BUG_ON(!i915_gem_object_has_pages(obj));
        drm_clflush_sg(obj->mm.pages);
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
 }
 
 static int clflush_work(struct dma_fence_work *base)
index 9937b4c..f86400a 100644 (file)
@@ -664,7 +664,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        i915_gem_object_unlock(obj);
 
        if (write_domain)
-               intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+               i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
 
 out_unpin:
        i915_gem_object_unpin_pages(obj);
@@ -784,7 +784,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
        }
 
 out:
-       intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
        obj->mm.dirty = true;
        /* return with the pages pinned */
        return 0;
index a50296c..a596548 100644 (file)
@@ -280,7 +280,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
                for_each_ggtt_vma(vma, obj)
                        intel_gt_flush_ggtt_writes(vma->vm->gt);
 
-               intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+               i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
 
                for_each_ggtt_vma(vma, obj) {
                        if (vma->iomap)
@@ -308,6 +308,30 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
        obj->write_domain = 0;
 }
 
+void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+                                        enum fb_op_origin origin)
+{
+       struct intel_frontbuffer *front;
+
+       front = __intel_frontbuffer_get(obj);
+       if (front) {
+               intel_frontbuffer_flush(front, origin);
+               intel_frontbuffer_put(front);
+       }
+}
+
+void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+                                             enum fb_op_origin origin)
+{
+       struct intel_frontbuffer *front;
+
+       front = __intel_frontbuffer_get(obj);
+       if (front) {
+               intel_frontbuffer_invalidate(front, origin);
+               intel_frontbuffer_put(front);
+       }
+}
+
 void i915_gem_init__objects(struct drm_i915_private *i915)
 {
        INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
index 458cd51..4b93591 100644 (file)
@@ -13,8 +13,8 @@
 
 #include <drm/i915_drm.h>
 
+#include "display/intel_frontbuffer.h"
 #include "i915_gem_object_types.h"
-
 #include "i915_gem_gtt.h"
 
 void i915_gem_init__objects(struct drm_i915_private *i915);
@@ -463,4 +463,25 @@ int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
                                  unsigned int flags,
                                  const struct i915_sched_attr *attr);
 
+void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+                                        enum fb_op_origin origin);
+void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+                                             enum fb_op_origin origin);
+
+static inline void
+i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+                                 enum fb_op_origin origin)
+{
+       if (unlikely(rcu_access_pointer(obj->frontbuffer)))
+               __i915_gem_object_flush_frontbuffer(obj, origin);
+}
+
+static inline void
+i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+                                      enum fb_op_origin origin)
+{
+       if (unlikely(rcu_access_pointer(obj->frontbuffer)))
+               __i915_gem_object_invalidate_frontbuffer(obj, origin);
+}
+
 #endif
index 9600837..e3f3944 100644 (file)
@@ -150,7 +150,7 @@ struct drm_i915_gem_object {
         */
        u16 write_domain;
 
-       struct intel_frontbuffer *frontbuffer;
+       struct intel_frontbuffer __rcu *frontbuffer;
 
        /** Current tiling stride for the object, if it's tiled. */
        unsigned int tiling_and_stride;
index a459a42..7e64b7d 100644 (file)
@@ -94,8 +94,9 @@ static int __gt_park(struct intel_wakeref *wf)
                intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
        }
 
+       /* Defer dropping the display power well for 100ms, it's slow! */
        GEM_BUG_ON(!wakeref);
-       intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+       intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
 
        i915_globals_park();
 
index d034fa4..905890e 100644 (file)
@@ -161,7 +161,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
         * We manually control the domain here and pretend that it
         * remains coherent i.e. in the GTT domain, like shmem_pwrite.
         */
-       intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
 
        if (copy_from_user(vaddr, user_data, args->size))
                return -EFAULT;
@@ -169,7 +169,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
        drm_clflush_virt_range(vaddr, args->size);
        intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
 
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
        return 0;
 }
 
@@ -589,7 +589,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                goto out_unpin;
        }
 
-       intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
 
        user_data = u64_to_user_ptr(args->data_ptr);
        offset = args->offset;
@@ -631,7 +631,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                user_data += page_length;
                offset += page_length;
        }
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
 
        i915_gem_object_unlock_fence(obj, fence);
 out_unpin:
@@ -721,7 +721,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
                offset = 0;
        }
 
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
        i915_gem_object_unlock_fence(obj, fence);
 
        return ret;
index 2814218..6f09aa0 100644 (file)
@@ -144,61 +144,40 @@ static inline s64 ktime_since(const ktime_t kt)
        return ktime_to_ns(ktime_sub(ktime_get(), kt));
 }
 
-static u64 __pmu_estimate_rc6(struct i915_pmu *pmu)
-{
-       u64 val;
-
-       /*
-        * We think we are runtime suspended.
-        *
-        * Report the delta from when the device was suspended to now,
-        * on top of the last known real value, as the approximated RC6
-        * counter value.
-        */
-       val = ktime_since(pmu->sleep_last);
-       val += pmu->sample[__I915_SAMPLE_RC6].cur;
-
-       pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
-
-       return val;
-}
-
-static u64 __pmu_update_rc6(struct i915_pmu *pmu, u64 val)
-{
-       /*
-        * If we are coming back from being runtime suspended we must
-        * be careful not to report a larger value than returned
-        * previously.
-        */
-       if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
-               pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
-               pmu->sample[__I915_SAMPLE_RC6].cur = val;
-       } else {
-               val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
-       }
-
-       return val;
-}
-
 static u64 get_rc6(struct intel_gt *gt)
 {
        struct drm_i915_private *i915 = gt->i915;
        struct i915_pmu *pmu = &i915->pmu;
        unsigned long flags;
+       bool awake = false;
        u64 val;
 
-       val = 0;
        if (intel_gt_pm_get_if_awake(gt)) {
                val = __get_rc6(gt);
                intel_gt_pm_put_async(gt);
+               awake = true;
        }
 
        spin_lock_irqsave(&pmu->lock, flags);
 
-       if (val)
-               val = __pmu_update_rc6(pmu, val);
+       if (awake) {
+               pmu->sample[__I915_SAMPLE_RC6].cur = val;
+       } else {
+               /*
+                * We think we are runtime suspended.
+                *
+                * Report the delta from when the device was suspended to now,
+                * on top of the last known real value, as the approximated RC6
+                * counter value.
+                */
+               val = ktime_since(pmu->sleep_last);
+               val += pmu->sample[__I915_SAMPLE_RC6].cur;
+       }
+
+       if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur)
+               val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur;
        else
-               val = __pmu_estimate_rc6(pmu);
+               pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val;
 
        spin_unlock_irqrestore(&pmu->lock, flags);
 
@@ -210,20 +189,11 @@ static void park_rc6(struct drm_i915_private *i915)
        struct i915_pmu *pmu = &i915->pmu;
 
        if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
-               __pmu_update_rc6(pmu, __get_rc6(&i915->gt));
+               pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
 
        pmu->sleep_last = ktime_get();
 }
 
-static void unpark_rc6(struct drm_i915_private *i915)
-{
-       struct i915_pmu *pmu = &i915->pmu;
-
-       /* Estimate how long we slept and accumulate that into rc6 counters */
-       if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
-               __pmu_estimate_rc6(pmu);
-}
-
 #else
 
 static u64 get_rc6(struct intel_gt *gt)
@@ -232,7 +202,6 @@ static u64 get_rc6(struct intel_gt *gt)
 }
 
 static void park_rc6(struct drm_i915_private *i915) {}
-static void unpark_rc6(struct drm_i915_private *i915) {}
 
 #endif
 
@@ -281,8 +250,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
         */
        __i915_pmu_maybe_start_timer(pmu);
 
-       unpark_rc6(i915);
-
        spin_unlock_irq(&pmu->lock);
 }
 
index bf52e39..6c1647c 100644 (file)
@@ -18,7 +18,7 @@ enum {
        __I915_SAMPLE_FREQ_ACT = 0,
        __I915_SAMPLE_FREQ_REQ,
        __I915_SAMPLE_RC6,
-       __I915_SAMPLE_RC6_ESTIMATED,
+       __I915_SAMPLE_RC6_LAST_REPORTED,
        __I915_NUM_PMU_SAMPLERS
 };
 
index e5512f2..01c8222 100644 (file)
@@ -1104,8 +1104,14 @@ int i915_vma_move_to_active(struct i915_vma *vma,
                return err;
 
        if (flags & EXEC_OBJECT_WRITE) {
-               if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
-                       i915_active_add_request(&obj->frontbuffer->write, rq);
+               struct intel_frontbuffer *front;
+
+               front = __intel_frontbuffer_get(obj);
+               if (unlikely(front)) {
+                       if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
+                               i915_active_add_request(&front->write, rq);
+                       intel_frontbuffer_put(front);
+               }
 
                dma_resv_add_excl_fence(vma->resv, &rq->fence);
                obj->write_domain = I915_GEM_DOMAIN_RENDER;
index c4e4b01..4bc794d 100644 (file)
@@ -121,7 +121,8 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
                "cdev 0x%p, p# %u.\n", cdev, cdev->nports);
        cxgbi_hbas_remove(cdev);
        cxgbi_device_portmap_cleanup(cdev);
-       cxgbi_ppm_release(cdev->cdev2ppm(cdev));
+       if (cdev->cdev2ppm)
+               cxgbi_ppm_release(cdev->cdev2ppm(cdev));
        if (cdev->pmap.max_connect)
                cxgbi_free_big_mem(cdev->pmap.port_csk);
        kfree(cdev);
index 2e6a68d..a5ecbce 100644 (file)
@@ -5385,7 +5385,6 @@ static const struct file_operations lpfc_debugfs_ras_log = {
        .read =         lpfc_debugfs_read,
        .release =      lpfc_debugfs_ras_log_release,
 };
-#endif
 
 #undef lpfc_debugfs_op_dumpHBASlim
 static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
@@ -5557,7 +5556,7 @@ static const struct file_operations lpfc_idiag_op_extAcc = {
        .write =        lpfc_idiag_extacc_write,
        .release =      lpfc_idiag_cmd_release,
 };
-
+#endif
 
 /* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
  * @phba: Pointer to HBA context object.
index 6298b17..6a04fdb 100644 (file)
@@ -5883,7 +5883,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
                        break;
                default:
                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                                       "1804 Invalid asynchrous event code: "
+                                       "1804 Invalid asynchronous event code: "
                                        "x%x\n", bf_get(lpfc_trailer_code,
                                        &cq_event->cqe.mcqe_cmpl));
                        break;
index c82b579..625c046 100644 (file)
@@ -8555,7 +8555,7 @@ lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
        psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
        spin_unlock_irq(&phba->hbalock);
 
-       /* wake up worker thread to post asynchronlous mailbox command */
+       /* wake up worker thread to post asynchronous mailbox command */
        lpfc_worker_wake_up(phba);
 }
 
@@ -8823,7 +8823,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
                return rc;
        }
 
-       /* Now, interrupt mode asynchrous mailbox command */
+       /* Now, interrupt mode asynchronous mailbox command */
        rc = lpfc_mbox_cmd_check(phba, mboxq);
        if (rc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -13112,11 +13112,11 @@ lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
 }
 
 /**
- * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
+ * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
  * @phba: Pointer to HBA context object.
  * @cqe: Pointer to mailbox completion queue entry.
  *
- * This routine process a mailbox completion queue entry with asynchrous
+ * This routine process a mailbox completion queue entry with asynchronous
  * event.
  *
  * Return: true if work posted to worker thread, otherwise false.
@@ -13270,7 +13270,7 @@ out_no_mqe_complete:
  * @cqe: Pointer to mailbox completion queue entry.
  *
  * This routine process a mailbox completion queue entry, it invokes the
- * proper mailbox complete handling or asynchrous event handling routine
+ * proper mailbox complete handling or asynchronous event handling routine
  * according to the MCQE's async bit.
  *
  * Return: true if work posted to worker thread, otherwise false.
index 848fbec..45fd8df 100644 (file)
@@ -5248,7 +5248,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                                        &ct->chain_buffer_dma);
                        if (!ct->chain_buffer) {
                                ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
-                               _base_release_memory_pools(ioc);
                                goto out;
                        }
                }
index 6949ea8..51ffd5c 100644 (file)
@@ -646,7 +646,9 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
        }
 
        bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
-       bip_set_seed(bip, bio->bi_iter.bi_sector);
+       /* virtual start sector must be in integrity interval units */
+       bip_set_seed(bip, bio->bi_iter.bi_sector >>
+                                 (bi->interval_exp - SECTOR_SHIFT));
 
        pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
                 (unsigned long long)bip->bip_iter.bi_sector);
index ce9bac7..40705e8 100644 (file)
@@ -1693,6 +1693,7 @@ struct cifs_fattr {
        struct timespec64 cf_atime;
        struct timespec64 cf_mtime;
        struct timespec64 cf_ctime;
+       u32             cf_cifstag;
 };
 
 static inline void free_dfs_info_param(struct dfs_info3_param *param)
index 3925a7b..d17587c 100644 (file)
@@ -139,6 +139,28 @@ retry:
        dput(dentry);
 }
 
+static bool reparse_file_needs_reval(const struct cifs_fattr *fattr)
+{
+       if (!(fattr->cf_cifsattrs & ATTR_REPARSE))
+               return false;
+       /*
+        * The DFS tags should be only intepreted by server side as per
+        * MS-FSCC 2.1.2.1, but let's include them anyway.
+        *
+        * Besides, if cf_cifstag is unset (0), then we still need it to be
+        * revalidated to know exactly what reparse point it is.
+        */
+       switch (fattr->cf_cifstag) {
+       case IO_REPARSE_TAG_DFS:
+       case IO_REPARSE_TAG_DFSR:
+       case IO_REPARSE_TAG_SYMLINK:
+       case IO_REPARSE_TAG_NFS:
+       case 0:
+               return true;
+       }
+       return false;
+}
+
 static void
 cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
 {
@@ -158,7 +180,7 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
         * is a symbolic link, DFS referral or a reparse point with a direct
         * access like junctions, deduplicated files, NFS symlinks.
         */
-       if (fattr->cf_cifsattrs & ATTR_REPARSE)
+       if (reparse_file_needs_reval(fattr))
                fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
 
        /* non-unix readdir doesn't provide nlink */
@@ -194,19 +216,37 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
        }
 }
 
+static void __dir_info_to_fattr(struct cifs_fattr *fattr, const void *info)
+{
+       const FILE_DIRECTORY_INFO *fi = info;
+
+       memset(fattr, 0, sizeof(*fattr));
+       fattr->cf_cifsattrs = le32_to_cpu(fi->ExtFileAttributes);
+       fattr->cf_eof = le64_to_cpu(fi->EndOfFile);
+       fattr->cf_bytes = le64_to_cpu(fi->AllocationSize);
+       fattr->cf_createtime = le64_to_cpu(fi->CreationTime);
+       fattr->cf_atime = cifs_NTtimeToUnix(fi->LastAccessTime);
+       fattr->cf_ctime = cifs_NTtimeToUnix(fi->ChangeTime);
+       fattr->cf_mtime = cifs_NTtimeToUnix(fi->LastWriteTime);
+}
+
 void
 cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
                       struct cifs_sb_info *cifs_sb)
 {
-       memset(fattr, 0, sizeof(*fattr));
-       fattr->cf_cifsattrs = le32_to_cpu(info->ExtFileAttributes);
-       fattr->cf_eof = le64_to_cpu(info->EndOfFile);
-       fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
-       fattr->cf_createtime = le64_to_cpu(info->CreationTime);
-       fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
-       fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
-       fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
+       __dir_info_to_fattr(fattr, info);
+       cifs_fill_common_info(fattr, cifs_sb);
+}
 
+static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr,
+                                      SEARCH_ID_FULL_DIR_INFO *info,
+                                      struct cifs_sb_info *cifs_sb)
+{
+       __dir_info_to_fattr(fattr, info);
+
+       /* See MS-FSCC 2.4.18 FileIdFullDirectoryInformation */
+       if (fattr->cf_cifsattrs & ATTR_REPARSE)
+               fattr->cf_cifstag = le32_to_cpu(info->EaSize);
        cifs_fill_common_info(fattr, cifs_sb);
 }
 
@@ -755,6 +795,11 @@ static int cifs_filldir(char *find_entry, struct file *file,
                                       (FIND_FILE_STANDARD_INFO *)find_entry,
                                       cifs_sb);
                break;
+       case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+               cifs_fulldir_info_to_fattr(&fattr,
+                                          (SEARCH_ID_FULL_DIR_INFO *)find_entry,
+                                          cifs_sb);
+               break;
        default:
                cifs_dir_info_to_fattr(&fattr,
                                       (FILE_DIRECTORY_INFO *)find_entry,
index 8b0b512..afe1f03 100644 (file)
@@ -67,7 +67,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
                goto out;
 
 
-        if (oparms->tcon->use_resilient) {
+       if (oparms->tcon->use_resilient) {
                /* default timeout is 0, servers pick default (120 seconds) */
                nr_ioctl_req.Timeout =
                        cpu_to_le32(oparms->tcon->handle_timeout);
index 11e80b7..541c8a3 100644 (file)
@@ -92,7 +92,6 @@ struct io_wqe {
        struct io_wqe_acct acct[2];
 
        struct hlist_nulls_head free_list;
-       struct hlist_nulls_head busy_list;
        struct list_head all_list;
 
        struct io_wq *wq;
@@ -327,7 +326,6 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
        if (worker->flags & IO_WORKER_F_FREE) {
                worker->flags &= ~IO_WORKER_F_FREE;
                hlist_nulls_del_init_rcu(&worker->nulls_node);
-               hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list);
        }
 
        /*
@@ -365,7 +363,6 @@ static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
 {
        if (!(worker->flags & IO_WORKER_F_FREE)) {
                worker->flags |= IO_WORKER_F_FREE;
-               hlist_nulls_del_init_rcu(&worker->nulls_node);
                hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
        }
 
@@ -432,6 +429,8 @@ next:
                if (signal_pending(current))
                        flush_signals(current);
 
+               cond_resched();
+
                spin_lock_irq(&worker->lock);
                worker->cur_work = work;
                spin_unlock_irq(&worker->lock);
@@ -798,10 +797,6 @@ void io_wq_cancel_all(struct io_wq *wq)
 
        set_bit(IO_WQ_BIT_CANCEL, &wq->state);
 
-       /*
-        * Browse both lists, as there's a gap between handing work off
-        * to a worker and the worker putting itself on the busy_list
-        */
        rcu_read_lock();
        for_each_node(node) {
                struct io_wqe *wqe = wq->wqes[node];
@@ -1049,7 +1044,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
                spin_lock_init(&wqe->lock);
                INIT_WQ_LIST(&wqe->work_list);
                INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
-               INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1);
                INIT_LIST_HEAD(&wqe->all_list);
        }
 
index 6f084e3..562e3a1 100644 (file)
@@ -330,6 +330,26 @@ struct io_timeout {
        struct file                     *file;
        u64                             addr;
        int                             flags;
+       unsigned                        count;
+};
+
+struct io_rw {
+       /* NOTE: kiocb has the file as the first member, so don't do it here */
+       struct kiocb                    kiocb;
+       u64                             addr;
+       u64                             len;
+};
+
+struct io_connect {
+       struct file                     *file;
+       struct sockaddr __user          *addr;
+       int                             addr_len;
+};
+
+struct io_sr_msg {
+       struct file                     *file;
+       struct user_msghdr __user       *msg;
+       int                             msg_flags;
 };
 
 struct io_async_connect {
@@ -351,7 +371,6 @@ struct io_async_rw {
 };
 
 struct io_async_ctx {
-       struct io_uring_sqe             sqe;
        union {
                struct io_async_rw      rw;
                struct io_async_msghdr  msg;
@@ -369,15 +388,16 @@ struct io_async_ctx {
 struct io_kiocb {
        union {
                struct file             *file;
-               struct kiocb            rw;
+               struct io_rw            rw;
                struct io_poll_iocb     poll;
                struct io_accept        accept;
                struct io_sync          sync;
                struct io_cancel        cancel;
                struct io_timeout       timeout;
+               struct io_connect       connect;
+               struct io_sr_msg        sr_msg;
        };
 
-       const struct io_uring_sqe       *sqe;
        struct io_async_ctx             *io;
        struct file                     *ring_file;
        int                             ring_fd;
@@ -411,7 +431,6 @@ struct io_kiocb {
 #define REQ_F_INFLIGHT         16384   /* on inflight list */
 #define REQ_F_COMP_LOCKED      32768   /* completion under lock */
 #define REQ_F_HARDLINK         65536   /* doesn't sever on completion < 0 */
-#define REQ_F_PREPPED          131072  /* request already opcode prepared */
        u64                     user_data;
        u32                     result;
        u32                     sequence;
@@ -609,33 +628,31 @@ static inline bool io_prep_async_work(struct io_kiocb *req,
 {
        bool do_hashed = false;
 
-       if (req->sqe) {
-               switch (req->opcode) {
-               case IORING_OP_WRITEV:
-               case IORING_OP_WRITE_FIXED:
-                       /* only regular files should be hashed for writes */
-                       if (req->flags & REQ_F_ISREG)
-                               do_hashed = true;
-                       /* fall-through */
-               case IORING_OP_READV:
-               case IORING_OP_READ_FIXED:
-               case IORING_OP_SENDMSG:
-               case IORING_OP_RECVMSG:
-               case IORING_OP_ACCEPT:
-               case IORING_OP_POLL_ADD:
-               case IORING_OP_CONNECT:
-                       /*
-                        * We know REQ_F_ISREG is not set on some of these
-                        * opcodes, but this enables us to keep the check in
-                        * just one place.
-                        */
-                       if (!(req->flags & REQ_F_ISREG))
-                               req->work.flags |= IO_WQ_WORK_UNBOUND;
-                       break;
-               }
-               if (io_req_needs_user(req))
-                       req->work.flags |= IO_WQ_WORK_NEEDS_USER;
+       switch (req->opcode) {
+       case IORING_OP_WRITEV:
+       case IORING_OP_WRITE_FIXED:
+               /* only regular files should be hashed for writes */
+               if (req->flags & REQ_F_ISREG)
+                       do_hashed = true;
+               /* fall-through */
+       case IORING_OP_READV:
+       case IORING_OP_READ_FIXED:
+       case IORING_OP_SENDMSG:
+       case IORING_OP_RECVMSG:
+       case IORING_OP_ACCEPT:
+       case IORING_OP_POLL_ADD:
+       case IORING_OP_CONNECT:
+               /*
+                * We know REQ_F_ISREG is not set on some of these
+                * opcodes, but this enables us to keep the check in
+                * just one place.
+                */
+               if (!(req->flags & REQ_F_ISREG))
+                       req->work.flags |= IO_WQ_WORK_UNBOUND;
+               break;
        }
+       if (io_req_needs_user(req))
+               req->work.flags |= IO_WQ_WORK_NEEDS_USER;
 
        *link = io_prep_linked_timeout(req);
        return do_hashed;
@@ -1180,7 +1197,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
 
        ret = 0;
        list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
-               struct kiocb *kiocb = &req->rw;
+               struct kiocb *kiocb = &req->rw.kiocb;
 
                /*
                 * Move completed entries to our local list. If we find a
@@ -1335,7 +1352,7 @@ static inline void req_set_fail_links(struct io_kiocb *req)
 
 static void io_complete_rw_common(struct kiocb *kiocb, long res)
 {
-       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
 
        if (kiocb->ki_flags & IOCB_WRITE)
                kiocb_end_write(req);
@@ -1347,7 +1364,7 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res)
 
 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
 {
-       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
 
        io_complete_rw_common(kiocb, res);
        io_put_req(req);
@@ -1355,7 +1372,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
 
 static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
 {
-       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
        struct io_kiocb *nxt = NULL;
 
        io_complete_rw_common(kiocb, res);
@@ -1366,7 +1383,7 @@ static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
 
 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
 {
-       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
 
        if (kiocb->ki_flags & IOCB_WRITE)
                kiocb_end_write(req);
@@ -1400,7 +1417,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
 
                list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
                                                list);
-               if (list_req->rw.ki_filp != req->rw.ki_filp)
+               if (list_req->file != req->file)
                        ctx->poll_multi_file = true;
        }
 
@@ -1471,11 +1488,11 @@ static bool io_file_supports_async(struct file *file)
        return false;
 }
 
-static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
+static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                     bool force_nonblock)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_ring_ctx *ctx = req->ctx;
-       struct kiocb *kiocb = &req->rw;
+       struct kiocb *kiocb = &req->rw.kiocb;
        unsigned ioprio;
        int ret;
 
@@ -1524,6 +1541,12 @@ static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
                        return -EINVAL;
                kiocb->ki_complete = io_complete_rw;
        }
+
+       req->rw.addr = READ_ONCE(sqe->addr);
+       req->rw.len = READ_ONCE(sqe->len);
+       /* we own ->private, reuse it for the buffer index */
+       req->rw.kiocb.private = (void *) (unsigned long)
+                                       READ_ONCE(sqe->buf_index);
        return 0;
 }
 
@@ -1557,11 +1580,11 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
                io_rw_done(kiocb, ret);
 }
 
-static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw,
-                              const struct io_uring_sqe *sqe,
+static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
                               struct iov_iter *iter)
 {
-       size_t len = READ_ONCE(sqe->len);
+       struct io_ring_ctx *ctx = req->ctx;
+       size_t len = req->rw.len;
        struct io_mapped_ubuf *imu;
        unsigned index, buf_index;
        size_t offset;
@@ -1571,13 +1594,13 @@ static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw,
        if (unlikely(!ctx->user_bufs))
                return -EFAULT;
 
-       buf_index = READ_ONCE(sqe->buf_index);
+       buf_index = (unsigned long) req->rw.kiocb.private;
        if (unlikely(buf_index >= ctx->nr_user_bufs))
                return -EFAULT;
 
        index = array_index_nospec(buf_index, ctx->nr_user_bufs);
        imu = &ctx->user_bufs[index];
-       buf_addr = READ_ONCE(sqe->addr);
+       buf_addr = req->rw.addr;
 
        /* overflow */
        if (buf_addr + len < buf_addr)
@@ -1634,25 +1657,20 @@ static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw,
 static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
                               struct iovec **iovec, struct iov_iter *iter)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
-       void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
-       size_t sqe_len = READ_ONCE(sqe->len);
+       void __user *buf = u64_to_user_ptr(req->rw.addr);
+       size_t sqe_len = req->rw.len;
        u8 opcode;
 
-       /*
-        * We're reading ->opcode for the second time, but the first read
-        * doesn't care whether it's _FIXED or not, so it doesn't matter
-        * whether ->opcode changes concurrently. The first read does care
-        * about whether it is a READ or a WRITE, so we don't trust this read
-        * for that purpose and instead let the caller pass in the read/write
-        * flag.
-        */
        opcode = req->opcode;
        if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
                *iovec = NULL;
-               return io_import_fixed(req->ctx, rw, sqe, iter);
+               return io_import_fixed(req, rw, iter);
        }
 
+       /* buffer index only valid with fixed read/write */
+       if (req->rw.kiocb.private)
+               return -EINVAL;
+
        if (req->io) {
                struct io_async_rw *iorw = &req->io->rw;
 
@@ -1750,13 +1768,7 @@ static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
 static int io_alloc_async_ctx(struct io_kiocb *req)
 {
        req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
-       if (req->io) {
-               memcpy(&req->io->sqe, req->sqe, sizeof(req->io->sqe));
-               req->sqe = &req->io->sqe;
-               return 0;
-       }
-
-       return 1;
+       return req->io == NULL;
 }
 
 static void io_rw_async(struct io_wq_work **workptr)
@@ -1782,46 +1794,52 @@ static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
        return 0;
 }
 
-static int io_read_prep(struct io_kiocb *req, struct iovec **iovec,
-                       struct iov_iter *iter, bool force_nonblock)
+static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                       bool force_nonblock)
 {
+       struct io_async_ctx *io;
+       struct iov_iter iter;
        ssize_t ret;
 
-       ret = io_prep_rw(req, force_nonblock);
+       ret = io_prep_rw(req, sqe, force_nonblock);
        if (ret)
                return ret;
 
        if (unlikely(!(req->file->f_mode & FMODE_READ)))
                return -EBADF;
 
-       return io_import_iovec(READ, req, iovec, iter);
+       if (!req->io)
+               return 0;
+
+       io = req->io;
+       io->rw.iov = io->rw.fast_iov;
+       req->io = NULL;
+       ret = io_import_iovec(READ, req, &io->rw.iov, &iter);
+       req->io = io;
+       if (ret < 0)
+               return ret;
+
+       io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
+       return 0;
 }
 
 static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
                   bool force_nonblock)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
-       struct kiocb *kiocb = &req->rw;
+       struct kiocb *kiocb = &req->rw.kiocb;
        struct iov_iter iter;
-       struct file *file;
        size_t iov_count;
        ssize_t io_size, ret;
 
-       if (!req->io) {
-               ret = io_read_prep(req, &iovec, &iter, force_nonblock);
-               if (ret < 0)
-                       return ret;
-       } else {
-               ret = io_import_iovec(READ, req, &iovec, &iter);
-               if (ret < 0)
-                       return ret;
-       }
+       ret = io_import_iovec(READ, req, &iovec, &iter);
+       if (ret < 0)
+               return ret;
 
        /* Ensure we clear previously set non-block flag */
        if (!force_nonblock)
-               req->rw.ki_flags &= ~IOCB_NOWAIT;
+               req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
 
-       file = req->file;
        io_size = ret;
        if (req->flags & REQ_F_LINK)
                req->result = io_size;
@@ -1830,20 +1848,20 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
         * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
         * we know to async punt it even if it was opened O_NONBLOCK
         */
-       if (force_nonblock && !io_file_supports_async(file)) {
+       if (force_nonblock && !io_file_supports_async(req->file)) {
                req->flags |= REQ_F_MUST_PUNT;
                goto copy_iov;
        }
 
        iov_count = iov_iter_count(&iter);
-       ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
+       ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
        if (!ret) {
                ssize_t ret2;
 
-               if (file->f_op->read_iter)
-                       ret2 = call_read_iter(file, kiocb, &iter);
+               if (req->file->f_op->read_iter)
+                       ret2 = call_read_iter(req->file, kiocb, &iter);
                else
-                       ret2 = loop_rw_iter(READ, file, kiocb, &iter);
+                       ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
 
                /*
                 * In case of a short read, punt to async. This can happen
@@ -1875,46 +1893,52 @@ out_free:
        return ret;
 }
 
-static int io_write_prep(struct io_kiocb *req, struct iovec **iovec,
-                        struct iov_iter *iter, bool force_nonblock)
+static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                        bool force_nonblock)
 {
+       struct io_async_ctx *io;
+       struct iov_iter iter;
        ssize_t ret;
 
-       ret = io_prep_rw(req, force_nonblock);
+       ret = io_prep_rw(req, sqe, force_nonblock);
        if (ret)
                return ret;
 
        if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
                return -EBADF;
 
-       return io_import_iovec(WRITE, req, iovec, iter);
+       if (!req->io)
+               return 0;
+
+       io = req->io;
+       io->rw.iov = io->rw.fast_iov;
+       req->io = NULL;
+       ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter);
+       req->io = io;
+       if (ret < 0)
+               return ret;
+
+       io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
+       return 0;
 }
 
 static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
                    bool force_nonblock)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
-       struct kiocb *kiocb = &req->rw;
+       struct kiocb *kiocb = &req->rw.kiocb;
        struct iov_iter iter;
-       struct file *file;
        size_t iov_count;
        ssize_t ret, io_size;
 
-       if (!req->io) {
-               ret = io_write_prep(req, &iovec, &iter, force_nonblock);
-               if (ret < 0)
-                       return ret;
-       } else {
-               ret = io_import_iovec(WRITE, req, &iovec, &iter);
-               if (ret < 0)
-                       return ret;
-       }
+       ret = io_import_iovec(WRITE, req, &iovec, &iter);
+       if (ret < 0)
+               return ret;
 
        /* Ensure we clear previously set non-block flag */
        if (!force_nonblock)
-               req->rw.ki_flags &= ~IOCB_NOWAIT;
+               req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
 
-       file = kiocb->ki_filp;
        io_size = ret;
        if (req->flags & REQ_F_LINK)
                req->result = io_size;
@@ -1934,7 +1958,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
                goto copy_iov;
 
        iov_count = iov_iter_count(&iter);
-       ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
+       ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
        if (!ret) {
                ssize_t ret2;
 
@@ -1946,17 +1970,17 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
                 * we return to userspace.
                 */
                if (req->flags & REQ_F_ISREG) {
-                       __sb_start_write(file_inode(file)->i_sb,
+                       __sb_start_write(file_inode(req->file)->i_sb,
                                                SB_FREEZE_WRITE, true);
-                       __sb_writers_release(file_inode(file)->i_sb,
+                       __sb_writers_release(file_inode(req->file)->i_sb,
                                                SB_FREEZE_WRITE);
                }
                kiocb->ki_flags |= IOCB_WRITE;
 
-               if (file->f_op->write_iter)
-                       ret2 = call_write_iter(file, kiocb, &iter);
+               if (req->file->f_op->write_iter)
+                       ret2 = call_write_iter(req->file, kiocb, &iter);
                else
-                       ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
+                       ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
                if (!force_nonblock || ret2 != -EAGAIN) {
                        kiocb_done(kiocb, ret2, nxt, req->in_async);
                } else {
@@ -1989,13 +2013,10 @@ static int io_nop(struct io_kiocb *req)
        return 0;
 }
 
-static int io_prep_fsync(struct io_kiocb *req)
+static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_ring_ctx *ctx = req->ctx;
 
-       if (req->flags & REQ_F_PREPPED)
-               return 0;
        if (!req->file)
                return -EBADF;
 
@@ -2010,7 +2031,6 @@ static int io_prep_fsync(struct io_kiocb *req)
 
        req->sync.off = READ_ONCE(sqe->off);
        req->sync.len = READ_ONCE(sqe->len);
-       req->flags |= REQ_F_PREPPED;
        return 0;
 }
 
@@ -2036,7 +2056,7 @@ static void io_fsync_finish(struct io_wq_work **workptr)
        if (io_req_cancelled(req))
                return;
 
-       ret = vfs_fsync_range(req->rw.ki_filp, req->sync.off,
+       ret = vfs_fsync_range(req->file, req->sync.off,
                                end > 0 ? end : LLONG_MAX,
                                req->sync.flags & IORING_FSYNC_DATASYNC);
        if (ret < 0)
@@ -2051,11 +2071,6 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
                    bool force_nonblock)
 {
        struct io_wq_work *work, *old_work;
-       int ret;
-
-       ret = io_prep_fsync(req);
-       if (ret)
-               return ret;
 
        /* fsync always requires a blocking context */
        if (force_nonblock) {
@@ -2071,13 +2086,10 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
        return 0;
 }
 
-static int io_prep_sfr(struct io_kiocb *req)
+static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_ring_ctx *ctx = req->ctx;
 
-       if (req->flags & REQ_F_PREPPED)
-               return 0;
        if (!req->file)
                return -EBADF;
 
@@ -2089,7 +2101,6 @@ static int io_prep_sfr(struct io_kiocb *req)
        req->sync.off = READ_ONCE(sqe->off);
        req->sync.len = READ_ONCE(sqe->len);
        req->sync.flags = READ_ONCE(sqe->sync_range_flags);
-       req->flags |= REQ_F_PREPPED;
        return 0;
 }
 
@@ -2102,7 +2113,7 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr)
        if (io_req_cancelled(req))
                return;
 
-       ret = sync_file_range(req->rw.ki_filp, req->sync.off, req->sync.len,
+       ret = sync_file_range(req->file, req->sync.off, req->sync.len,
                                req->sync.flags);
        if (ret < 0)
                req_set_fail_links(req);
@@ -2116,11 +2127,6 @@ static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
                              bool force_nonblock)
 {
        struct io_wq_work *work, *old_work;
-       int ret;
-
-       ret = io_prep_sfr(req);
-       if (ret)
-               return ret;
 
        /* sync_file_range always requires a blocking context */
        if (force_nonblock) {
@@ -2149,19 +2155,23 @@ static void io_sendrecv_async(struct io_wq_work **workptr)
 }
 #endif
 
-static int io_sendmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
-       struct user_msghdr __user *msg;
-       unsigned flags;
+       struct io_sr_msg *sr = &req->sr_msg;
+       struct io_async_ctx *io = req->io;
+
+       sr->msg_flags = READ_ONCE(sqe->msg_flags);
+       sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+
+       if (!io)
+               return 0;
 
-       flags = READ_ONCE(sqe->msg_flags);
-       msg = (struct user_msghdr __user *)(unsigned long) READ_ONCE(sqe->addr);
        io->msg.iov = io->msg.fast_iov;
-       return sendmsg_copy_msghdr(&io->msg.msg, msg, flags, &io->msg.iov);
+       return sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
+                                       &io->msg.iov);
 #else
-       return 0;
+       return -EOPNOTSUPP;
 #endif
 }
 
@@ -2169,7 +2179,6 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
                      bool force_nonblock)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_async_msghdr *kmsg = NULL;
        struct socket *sock;
        int ret;
@@ -2183,12 +2192,6 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
                struct sockaddr_storage addr;
                unsigned flags;
 
-               flags = READ_ONCE(sqe->msg_flags);
-               if (flags & MSG_DONTWAIT)
-                       req->flags |= REQ_F_NOWAIT;
-               else if (force_nonblock)
-                       flags |= MSG_DONTWAIT;
-
                if (req->io) {
                        kmsg = &req->io->msg;
                        kmsg->msg.msg_name = &addr;
@@ -2197,13 +2200,24 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
                                kmsg->iov = kmsg->fast_iov;
                        kmsg->msg.msg_iter.iov = kmsg->iov;
                } else {
+                       struct io_sr_msg *sr = &req->sr_msg;
+
                        kmsg = &io.msg;
                        kmsg->msg.msg_name = &addr;
-                       ret = io_sendmsg_prep(req, &io);
+
+                       io.msg.iov = io.msg.fast_iov;
+                       ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
+                                       sr->msg_flags, &io.msg.iov);
                        if (ret)
-                               goto out;
+                               return ret;
                }
 
+               flags = req->sr_msg.msg_flags;
+               if (flags & MSG_DONTWAIT)
+                       req->flags |= REQ_F_NOWAIT;
+               else if (force_nonblock)
+                       flags |= MSG_DONTWAIT;
+
                ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
                if (force_nonblock && ret == -EAGAIN) {
                        if (req->io)
@@ -2218,7 +2232,6 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
                        ret = -EINTR;
        }
 
-out:
        if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
                kfree(kmsg->iov);
        io_cqring_add_event(req, ret);
@@ -2231,20 +2244,24 @@ out:
 #endif
 }
 
-static int io_recvmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_recvmsg_prep(struct io_kiocb *req,
+                          const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
-       struct user_msghdr __user *msg;
-       unsigned flags;
+       struct io_sr_msg *sr = &req->sr_msg;
+       struct io_async_ctx *io = req->io;
+
+       sr->msg_flags = READ_ONCE(sqe->msg_flags);
+       sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+
+       if (!io)
+               return 0;
 
-       flags = READ_ONCE(sqe->msg_flags);
-       msg = (struct user_msghdr __user *)(unsigned long) READ_ONCE(sqe->addr);
        io->msg.iov = io->msg.fast_iov;
-       return recvmsg_copy_msghdr(&io->msg.msg, msg, flags, &io->msg.uaddr,
-                                       &io->msg.iov);
+       return recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
+                                       &io->msg.uaddr, &io->msg.iov);
 #else
-       return 0;
+       return -EOPNOTSUPP;
 #endif
 }
 
@@ -2252,7 +2269,6 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
                      bool force_nonblock)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_async_msghdr *kmsg = NULL;
        struct socket *sock;
        int ret;
@@ -2262,19 +2278,10 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
 
        sock = sock_from_file(req->file, &ret);
        if (sock) {
-               struct user_msghdr __user *msg;
                struct io_async_ctx io;
                struct sockaddr_storage addr;
                unsigned flags;
 
-               flags = READ_ONCE(sqe->msg_flags);
-               if (flags & MSG_DONTWAIT)
-                       req->flags |= REQ_F_NOWAIT;
-               else if (force_nonblock)
-                       flags |= MSG_DONTWAIT;
-
-               msg = (struct user_msghdr __user *) (unsigned long)
-                       READ_ONCE(sqe->addr);
                if (req->io) {
                        kmsg = &req->io->msg;
                        kmsg->msg.msg_name = &addr;
@@ -2283,14 +2290,27 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
                                kmsg->iov = kmsg->fast_iov;
                        kmsg->msg.msg_iter.iov = kmsg->iov;
                } else {
+                       struct io_sr_msg *sr = &req->sr_msg;
+
                        kmsg = &io.msg;
                        kmsg->msg.msg_name = &addr;
-                       ret = io_recvmsg_prep(req, &io);
+
+                       io.msg.iov = io.msg.fast_iov;
+                       ret = recvmsg_copy_msghdr(&io.msg.msg, sr->msg,
+                                       sr->msg_flags, &io.msg.uaddr,
+                                       &io.msg.iov);
                        if (ret)
-                               goto out;
+                               return ret;
                }
 
-               ret = __sys_recvmsg_sock(sock, &kmsg->msg, msg, kmsg->uaddr, flags);
+               flags = req->sr_msg.msg_flags;
+               if (flags & MSG_DONTWAIT)
+                       req->flags |= REQ_F_NOWAIT;
+               else if (force_nonblock)
+                       flags |= MSG_DONTWAIT;
+
+               ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg,
+                                               kmsg->uaddr, flags);
                if (force_nonblock && ret == -EAGAIN) {
                        if (req->io)
                                return -EAGAIN;
@@ -2304,7 +2324,6 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
                        ret = -EINTR;
        }
 
-out:
        if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
                kfree(kmsg->iov);
        io_cqring_add_event(req, ret);
@@ -2317,25 +2336,19 @@ out:
 #endif
 }
 
-static int io_accept_prep(struct io_kiocb *req)
+static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_accept *accept = &req->accept;
 
-       if (req->flags & REQ_F_PREPPED)
-               return 0;
-
        if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
                return -EINVAL;
        if (sqe->ioprio || sqe->len || sqe->buf_index)
                return -EINVAL;
 
-       accept->addr = (struct sockaddr __user *)
-                               (unsigned long) READ_ONCE(sqe->addr);
-       accept->addr_len = (int __user *) (unsigned long) READ_ONCE(sqe->addr2);
+       accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
        accept->flags = READ_ONCE(sqe->accept_flags);
-       req->flags |= REQ_F_PREPPED;
        return 0;
 #else
        return -EOPNOTSUPP;
@@ -2383,10 +2396,6 @@ static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
 #if defined(CONFIG_NET)
        int ret;
 
-       ret = io_accept_prep(req);
-       if (ret)
-               return ret;
-
        ret = __io_accept(req, nxt, force_nonblock);
        if (ret == -EAGAIN && force_nonblock) {
                req->work.func = io_accept_finish;
@@ -2400,18 +2409,27 @@ static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
 #endif
 }
 
-static int io_connect_prep(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
-       struct sockaddr __user *addr;
-       int addr_len;
+       struct io_connect *conn = &req->connect;
+       struct io_async_ctx *io = req->io;
+
+       if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+               return -EINVAL;
+       if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
+               return -EINVAL;
 
-       addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
-       addr_len = READ_ONCE(sqe->addr2);
-       return move_addr_to_kernel(addr, addr_len, &io->connect.address);
+       conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       conn->addr_len =  READ_ONCE(sqe->addr2);
+
+       if (!io)
+               return 0;
+
+       return move_addr_to_kernel(conn->addr, conn->addr_len,
+                                       &io->connect.address);
 #else
-       return 0;
+       return -EOPNOTSUPP;
 #endif
 }
 
@@ -2419,30 +2437,25 @@ static int io_connect(struct io_kiocb *req, struct io_kiocb **nxt,
                      bool force_nonblock)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_async_ctx __io, *io;
        unsigned file_flags;
-       int addr_len, ret;
-
-       if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
-               return -EINVAL;
-
-       addr_len = READ_ONCE(sqe->addr2);
-       file_flags = force_nonblock ? O_NONBLOCK : 0;
+       int ret;
 
        if (req->io) {
                io = req->io;
        } else {
-               ret = io_connect_prep(req, &__io);
+               ret = move_addr_to_kernel(req->connect.addr,
+                                               req->connect.addr_len,
+                                               &__io.connect.address);
                if (ret)
                        goto out;
                io = &__io;
        }
 
-       ret = __sys_connect_file(req->file, &io->connect.address, addr_len,
-                                       file_flags);
+       file_flags = force_nonblock ? O_NONBLOCK : 0;
+
+       ret = __sys_connect_file(req->file, &io->connect.address,
+                                       req->connect.addr_len, file_flags);
        if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
                if (req->io)
                        return -EAGAIN;
@@ -2513,12 +2526,9 @@ static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
        return -ENOENT;
 }
 
-static int io_poll_remove_prep(struct io_kiocb *req)
+static int io_poll_remove_prep(struct io_kiocb *req,
+                              const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
-
-       if (req->flags & REQ_F_PREPPED)
-               return 0;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
@@ -2526,7 +2536,6 @@ static int io_poll_remove_prep(struct io_kiocb *req)
                return -EINVAL;
 
        req->poll.addr = READ_ONCE(sqe->addr);
-       req->flags |= REQ_F_PREPPED;
        return 0;
 }
 
@@ -2540,10 +2549,6 @@ static int io_poll_remove(struct io_kiocb *req)
        u64 addr;
        int ret;
 
-       ret = io_poll_remove_prep(req);
-       if (ret)
-               return ret;
-
        addr = req->poll.addr;
        spin_lock_irq(&ctx->completion_lock);
        ret = io_poll_cancel(ctx, addr);
@@ -2681,14 +2686,11 @@ static void io_poll_req_insert(struct io_kiocb *req)
        hlist_add_head(&req->hash_node, list);
 }
 
-static int io_poll_add_prep(struct io_kiocb *req)
+static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_poll_iocb *poll = &req->poll;
        u16 events;
 
-       if (req->flags & REQ_F_PREPPED)
-               return 0;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
@@ -2696,7 +2698,6 @@ static int io_poll_add_prep(struct io_kiocb *req)
        if (!poll->file)
                return -EBADF;
 
-       req->flags |= REQ_F_PREPPED;
        events = READ_ONCE(sqe->poll_events);
        poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
        return 0;
@@ -2709,11 +2710,6 @@ static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt)
        struct io_poll_table ipt;
        bool cancel = false;
        __poll_t mask;
-       int ret;
-
-       ret = io_poll_add_prep(req);
-       if (ret)
-               return ret;
 
        INIT_IO_WORK(&req->work, io_poll_complete_work);
        INIT_HLIST_NODE(&req->hash_node);
@@ -2832,12 +2828,9 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
        return 0;
 }
 
-static int io_timeout_remove_prep(struct io_kiocb *req)
+static int io_timeout_remove_prep(struct io_kiocb *req,
+                                 const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
-
-       if (req->flags & REQ_F_PREPPED)
-               return 0;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
@@ -2848,7 +2841,6 @@ static int io_timeout_remove_prep(struct io_kiocb *req)
        if (req->timeout.flags)
                return -EINVAL;
 
-       req->flags |= REQ_F_PREPPED;
        return 0;
 }
 
@@ -2860,10 +2852,6 @@ static int io_timeout_remove(struct io_kiocb *req)
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
 
-       ret = io_timeout_remove_prep(req);
-       if (ret)
-               return ret;
-
        spin_lock_irq(&ctx->completion_lock);
        ret = io_timeout_cancel(ctx, req->timeout.addr);
 
@@ -2877,10 +2865,9 @@ static int io_timeout_remove(struct io_kiocb *req)
        return 0;
 }
 
-static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
+static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
                           bool is_timeout_link)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_timeout_data *data;
        unsigned flags;
 
@@ -2894,7 +2881,12 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
        if (flags & ~IORING_TIMEOUT_ABS)
                return -EINVAL;
 
-       data = &io->timeout;
+       req->timeout.count = READ_ONCE(sqe->off);
+
+       if (!req->io && io_alloc_async_ctx(req))
+               return -ENOMEM;
+
+       data = &req->io->timeout;
        data->req = req;
        req->flags |= REQ_F_TIMEOUT;
 
@@ -2912,21 +2904,12 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
 
 static int io_timeout(struct io_kiocb *req)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        unsigned count;
        struct io_ring_ctx *ctx = req->ctx;
        struct io_timeout_data *data;
        struct list_head *entry;
        unsigned span = 0;
-       int ret;
 
-       if (!req->io) {
-               if (io_alloc_async_ctx(req))
-                       return -ENOMEM;
-               ret = io_timeout_prep(req, req->io, false);
-               if (ret)
-                       return ret;
-       }
        data = &req->io->timeout;
 
        /*
@@ -2934,7 +2917,7 @@ static int io_timeout(struct io_kiocb *req)
         * timeout event to be satisfied. If it isn't set, then this is
         * a pure timeout request, sequence isn't used.
         */
-       count = READ_ONCE(sqe->off);
+       count = req->timeout.count;
        if (!count) {
                req->flags |= REQ_F_TIMEOUT_NOSEQ;
                spin_lock_irq(&ctx->completion_lock);
@@ -3052,19 +3035,15 @@ done:
        io_put_req_find_next(req, nxt);
 }
 
-static int io_async_cancel_prep(struct io_kiocb *req)
+static int io_async_cancel_prep(struct io_kiocb *req,
+                               const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
-
-       if (req->flags & REQ_F_PREPPED)
-               return 0;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
            sqe->cancel_flags)
                return -EINVAL;
 
-       req->flags |= REQ_F_PREPPED;
        req->cancel.addr = READ_ONCE(sqe->addr);
        return 0;
 }
@@ -3072,21 +3051,14 @@ static int io_async_cancel_prep(struct io_kiocb *req)
 static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       int ret;
-
-       ret = io_async_cancel_prep(req);
-       if (ret)
-               return ret;
 
        io_async_find_and_cancel(ctx, req, req->cancel.addr, nxt, 0);
        return 0;
 }
 
-static int io_req_defer_prep(struct io_kiocb *req)
+static int io_req_defer_prep(struct io_kiocb *req,
+                            const struct io_uring_sqe *sqe)
 {
-       struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
-       struct io_async_ctx *io = req->io;
-       struct iov_iter iter;
        ssize_t ret = 0;
 
        switch (req->opcode) {
@@ -3094,61 +3066,47 @@ static int io_req_defer_prep(struct io_kiocb *req)
                break;
        case IORING_OP_READV:
        case IORING_OP_READ_FIXED:
-               /* ensure prep does right import */
-               req->io = NULL;
-               ret = io_read_prep(req, &iovec, &iter, true);
-               req->io = io;
-               if (ret < 0)
-                       break;
-               io_req_map_rw(req, ret, iovec, inline_vecs, &iter);
-               ret = 0;
+               ret = io_read_prep(req, sqe, true);
                break;
        case IORING_OP_WRITEV:
        case IORING_OP_WRITE_FIXED:
-               /* ensure prep does right import */
-               req->io = NULL;
-               ret = io_write_prep(req, &iovec, &iter, true);
-               req->io = io;
-               if (ret < 0)
-                       break;
-               io_req_map_rw(req, ret, iovec, inline_vecs, &iter);
-               ret = 0;
+               ret = io_write_prep(req, sqe, true);
                break;
        case IORING_OP_POLL_ADD:
-               ret = io_poll_add_prep(req);
+               ret = io_poll_add_prep(req, sqe);
                break;
        case IORING_OP_POLL_REMOVE:
-               ret = io_poll_remove_prep(req);
+               ret = io_poll_remove_prep(req, sqe);
                break;
        case IORING_OP_FSYNC:
-               ret = io_prep_fsync(req);
+               ret = io_prep_fsync(req, sqe);
                break;
        case IORING_OP_SYNC_FILE_RANGE:
-               ret = io_prep_sfr(req);
+               ret = io_prep_sfr(req, sqe);
                break;
        case IORING_OP_SENDMSG:
-               ret = io_sendmsg_prep(req, io);
+               ret = io_sendmsg_prep(req, sqe);
                break;
        case IORING_OP_RECVMSG:
-               ret = io_recvmsg_prep(req, io);
+               ret = io_recvmsg_prep(req, sqe);
                break;
        case IORING_OP_CONNECT:
-               ret = io_connect_prep(req, io);
+               ret = io_connect_prep(req, sqe);
                break;
        case IORING_OP_TIMEOUT:
-               ret = io_timeout_prep(req, io, false);
+               ret = io_timeout_prep(req, sqe, false);
                break;
        case IORING_OP_TIMEOUT_REMOVE:
-               ret = io_timeout_remove_prep(req);
+               ret = io_timeout_remove_prep(req, sqe);
                break;
        case IORING_OP_ASYNC_CANCEL:
-               ret = io_async_cancel_prep(req);
+               ret = io_async_cancel_prep(req, sqe);
                break;
        case IORING_OP_LINK_TIMEOUT:
-               ret = io_timeout_prep(req, io, true);
+               ret = io_timeout_prep(req, sqe, true);
                break;
        case IORING_OP_ACCEPT:
-               ret = io_accept_prep(req);
+               ret = io_accept_prep(req, sqe);
                break;
        default:
                printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
@@ -3160,7 +3118,7 @@ static int io_req_defer_prep(struct io_kiocb *req)
        return ret;
 }
 
-static int io_req_defer(struct io_kiocb *req)
+static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
@@ -3169,10 +3127,10 @@ static int io_req_defer(struct io_kiocb *req)
        if (!req_need_defer(req) && list_empty(&ctx->defer_list))
                return 0;
 
-       if (io_alloc_async_ctx(req))
+       if (!req->io && io_alloc_async_ctx(req))
                return -EAGAIN;
 
-       ret = io_req_defer_prep(req);
+       ret = io_req_defer_prep(req, sqe);
        if (ret < 0)
                return ret;
 
@@ -3188,9 +3146,8 @@ static int io_req_defer(struct io_kiocb *req)
        return -EIOCBQUEUED;
 }
 
-__attribute__((nonnull))
-static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
-                       bool force_nonblock)
+static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                       struct io_kiocb **nxt, bool force_nonblock)
 {
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
@@ -3200,52 +3157,109 @@ static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
                ret = io_nop(req);
                break;
        case IORING_OP_READV:
-               if (unlikely(req->sqe->buf_index))
-                       return -EINVAL;
-               ret = io_read(req, nxt, force_nonblock);
-               break;
-       case IORING_OP_WRITEV:
-               if (unlikely(req->sqe->buf_index))
-                       return -EINVAL;
-               ret = io_write(req, nxt, force_nonblock);
-               break;
        case IORING_OP_READ_FIXED:
+               if (sqe) {
+                       ret = io_read_prep(req, sqe, force_nonblock);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_read(req, nxt, force_nonblock);
                break;
+       case IORING_OP_WRITEV:
        case IORING_OP_WRITE_FIXED:
+               if (sqe) {
+                       ret = io_write_prep(req, sqe, force_nonblock);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_write(req, nxt, force_nonblock);
                break;
        case IORING_OP_FSYNC:
+               if (sqe) {
+                       ret = io_prep_fsync(req, sqe);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_fsync(req, nxt, force_nonblock);
                break;
        case IORING_OP_POLL_ADD:
+               if (sqe) {
+                       ret = io_poll_add_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_poll_add(req, nxt);
                break;
        case IORING_OP_POLL_REMOVE:
+               if (sqe) {
+                       ret = io_poll_remove_prep(req, sqe);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_poll_remove(req);
                break;
        case IORING_OP_SYNC_FILE_RANGE:
+               if (sqe) {
+                       ret = io_prep_sfr(req, sqe);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_sync_file_range(req, nxt, force_nonblock);
                break;
        case IORING_OP_SENDMSG:
+               if (sqe) {
+                       ret = io_sendmsg_prep(req, sqe);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_sendmsg(req, nxt, force_nonblock);
                break;
        case IORING_OP_RECVMSG:
+               if (sqe) {
+                       ret = io_recvmsg_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_recvmsg(req, nxt, force_nonblock);
                break;
        case IORING_OP_TIMEOUT:
+               if (sqe) {
+                       ret = io_timeout_prep(req, sqe, false);
+                       if (ret)
+                               break;
+               }
                ret = io_timeout(req);
                break;
        case IORING_OP_TIMEOUT_REMOVE:
+               if (sqe) {
+                       ret = io_timeout_remove_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_timeout_remove(req);
                break;
        case IORING_OP_ACCEPT:
+               if (sqe) {
+                       ret = io_accept_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_accept(req, nxt, force_nonblock);
                break;
        case IORING_OP_CONNECT:
+               if (sqe) {
+                       ret = io_connect_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_connect(req, nxt, force_nonblock);
                break;
        case IORING_OP_ASYNC_CANCEL:
+               if (sqe) {
+                       ret = io_async_cancel_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_async_cancel(req, nxt);
                break;
        default:
@@ -3289,7 +3303,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
                req->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
                req->in_async = true;
                do {
-                       ret = io_issue_sqe(req, &nxt, false);
+                       ret = io_issue_sqe(req, NULL, &nxt, false);
                        /*
                         * We can get EAGAIN for polled IO even though we're
                         * forcing a sync submission from here, since we can't
@@ -3355,14 +3369,15 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
        return table->files[index & IORING_FILE_TABLE_MASK];
 }
 
-static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
+static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
+                          const struct io_uring_sqe *sqe)
 {
        struct io_ring_ctx *ctx = req->ctx;
        unsigned flags;
        int fd, ret;
 
-       flags = READ_ONCE(req->sqe->flags);
-       fd = READ_ONCE(req->sqe->fd);
+       flags = READ_ONCE(sqe->flags);
+       fd = READ_ONCE(sqe->fd);
 
        if (flags & IOSQE_IO_DRAIN)
                req->flags |= REQ_F_IO_DRAIN;
@@ -3494,7 +3509,7 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
        return nxt;
 }
 
-static void __io_queue_sqe(struct io_kiocb *req)
+static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_kiocb *linked_timeout;
        struct io_kiocb *nxt = NULL;
@@ -3503,7 +3518,7 @@ static void __io_queue_sqe(struct io_kiocb *req)
 again:
        linked_timeout = io_prep_linked_timeout(req);
 
-       ret = io_issue_sqe(req, &nxt, true);
+       ret = io_issue_sqe(req, sqe, &nxt, true);
 
        /*
         * We async punt it if the file wasn't marked NOWAIT, or if the file
@@ -3550,7 +3565,7 @@ done_req:
        }
 }
 
-static void io_queue_sqe(struct io_kiocb *req)
+static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        int ret;
 
@@ -3560,7 +3575,7 @@ static void io_queue_sqe(struct io_kiocb *req)
        }
        req->ctx->drain_next = (req->flags & REQ_F_DRAIN_LINK);
 
-       ret = io_req_defer(req);
+       ret = io_req_defer(req, sqe);
        if (ret) {
                if (ret != -EIOCBQUEUED) {
                        io_cqring_add_event(req, ret);
@@ -3568,7 +3583,7 @@ static void io_queue_sqe(struct io_kiocb *req)
                        io_double_put_req(req);
                }
        } else
-               __io_queue_sqe(req);
+               __io_queue_sqe(req, sqe);
 }
 
 static inline void io_queue_link_head(struct io_kiocb *req)
@@ -3577,25 +3592,25 @@ static inline void io_queue_link_head(struct io_kiocb *req)
                io_cqring_add_event(req, -ECANCELED);
                io_double_put_req(req);
        } else
-               io_queue_sqe(req);
+               io_queue_sqe(req, NULL);
 }
 
 #define SQE_VALID_FLAGS        (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
                                IOSQE_IO_HARDLINK)
 
-static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
-                         struct io_kiocb **link)
+static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                         struct io_submit_state *state, struct io_kiocb **link)
 {
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
 
        /* enforce forwards compatibility on users */
-       if (unlikely(req->sqe->flags & ~SQE_VALID_FLAGS)) {
+       if (unlikely(sqe->flags & ~SQE_VALID_FLAGS)) {
                ret = -EINVAL;
                goto err_req;
        }
 
-       ret = io_req_set_file(state, req);
+       ret = io_req_set_file(state, req, sqe);
        if (unlikely(ret)) {
 err_req:
                io_cqring_add_event(req, ret);
@@ -3613,10 +3628,10 @@ err_req:
        if (*link) {
                struct io_kiocb *prev = *link;
 
-               if (req->sqe->flags & IOSQE_IO_DRAIN)
+               if (sqe->flags & IOSQE_IO_DRAIN)
                        (*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN;
 
-               if (req->sqe->flags & IOSQE_IO_HARDLINK)
+               if (sqe->flags & IOSQE_IO_HARDLINK)
                        req->flags |= REQ_F_HARDLINK;
 
                if (io_alloc_async_ctx(req)) {
@@ -3624,7 +3639,7 @@ err_req:
                        goto err_req;
                }
 
-               ret = io_req_defer_prep(req);
+               ret = io_req_defer_prep(req, sqe);
                if (ret) {
                        /* fail even hard links since we don't submit */
                        prev->flags |= REQ_F_FAIL_LINK;
@@ -3632,15 +3647,18 @@ err_req:
                }
                trace_io_uring_link(ctx, req, prev);
                list_add_tail(&req->link_list, &prev->link_list);
-       } else if (req->sqe->flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
+       } else if (sqe->flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
                req->flags |= REQ_F_LINK;
-               if (req->sqe->flags & IOSQE_IO_HARDLINK)
+               if (sqe->flags & IOSQE_IO_HARDLINK)
                        req->flags |= REQ_F_HARDLINK;
 
                INIT_LIST_HEAD(&req->link_list);
+               ret = io_req_defer_prep(req, sqe);
+               if (ret)
+                       req->flags |= REQ_F_FAIL_LINK;
                *link = req;
        } else {
-               io_queue_sqe(req);
+               io_queue_sqe(req, sqe);
        }
 
        return true;
@@ -3685,14 +3703,15 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
 }
 
 /*
- * Fetch an sqe, if one is available. Note that req->sqe will point to memory
+ * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
  * that is mapped by userspace. This means that care needs to be taken to
  * ensure that reads are stable, as we cannot rely on userspace always
  * being a good citizen. If members of the sqe are validated and then later
  * used, it's important that those reads are done through READ_ONCE() to
  * prevent a re-load down the line.
  */
-static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req)
+static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req,
+                         const struct io_uring_sqe **sqe_ptr)
 {
        struct io_rings *rings = ctx->rings;
        u32 *sq_array = ctx->sq_array;
@@ -3719,9 +3738,9 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req)
                 * link list.
                 */
                req->sequence = ctx->cached_sq_head;
-               req->sqe = &ctx->sq_sqes[head];
-               req->opcode = READ_ONCE(req->sqe->opcode);
-               req->user_data = READ_ONCE(req->sqe->user_data);
+               *sqe_ptr = &ctx->sq_sqes[head];
+               req->opcode = READ_ONCE((*sqe_ptr)->opcode);
+               req->user_data = READ_ONCE((*sqe_ptr)->user_data);
                ctx->cached_sq_head++;
                return true;
        }
@@ -3753,6 +3772,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
        }
 
        for (i = 0; i < nr; i++) {
+               const struct io_uring_sqe *sqe;
                struct io_kiocb *req;
                unsigned int sqe_flags;
 
@@ -3762,7 +3782,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
                                submitted = -EAGAIN;
                        break;
                }
-               if (!io_get_sqring(ctx, req)) {
+               if (!io_get_sqring(ctx, req, &sqe)) {
                        __io_free_req(req);
                        break;
                }
@@ -3776,7 +3796,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
                }
 
                submitted++;
-               sqe_flags = req->sqe->flags;
+               sqe_flags = sqe->flags;
 
                req->ring_file = ring_file;
                req->ring_fd = ring_fd;
@@ -3784,7 +3804,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
                req->in_async = async;
                req->needs_fixed_file = async;
                trace_io_uring_submit_sqe(ctx, req->user_data, true, async);
-               if (!io_submit_sqe(req, statep, &link))
+               if (!io_submit_sqe(req, sqe, statep, &link))
                        break;
                /*
                 * If previous wasn't linked and we have a linked command,
@@ -4702,7 +4722,7 @@ static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
                if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
                        return -EFAULT;
 
-               dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
+               dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
                dst->iov_len = ciov.iov_len;
                return 0;
        }
index 6782f0d..49e5383 100644 (file)
@@ -19,6 +19,8 @@ struct ahci_host_priv;
 struct platform_device;
 struct scsi_host_template;
 
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
 int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
 void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
 int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
index d3bbfdd..2dbde11 100644 (file)
@@ -1175,6 +1175,7 @@ extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
                                        struct ata_taskfile *tf, u16 *id);
 extern void ata_qc_complete(struct ata_queued_cmd *qc);
 extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
+extern u64 ata_qc_get_active(struct ata_port *ap);
 extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
 extern int ata_std_bios_param(struct scsi_device *sdev,
                              struct block_device *bdev,
index efe06d6..e59eb9e 100755 (executable)
@@ -31,15 +31,12 @@ class KunitStatus(Enum):
        TEST_FAILURE = auto()
 
 def create_default_kunitconfig():
-       if not os.path.exists(kunit_kernel.KUNITCONFIG_PATH):
+       if not os.path.exists(kunit_kernel.kunitconfig_path):
                shutil.copyfile('arch/um/configs/kunit_defconfig',
-                               kunit_kernel.KUNITCONFIG_PATH)
+                               kunit_kernel.kunitconfig_path)
 
 def run_tests(linux: kunit_kernel.LinuxSourceTree,
              request: KunitRequest) -> KunitResult:
-       if request.defconfig:
-               create_default_kunitconfig()
-
        config_start = time.time()
        success = linux.build_reconfig(request.build_dir)
        config_end = time.time()
@@ -108,15 +105,22 @@ def main(argv, linux=None):
        run_parser.add_argument('--build_dir',
                                help='As in the make command, it specifies the build '
                                'directory.',
-                               type=str, default=None, metavar='build_dir')
+                               type=str, default='', metavar='build_dir')
 
        run_parser.add_argument('--defconfig',
-                               help='Uses a default kunitconfig.',
+                               help='Uses a default .kunitconfig.',
                                action='store_true')
 
        cli_args = parser.parse_args(argv)
 
        if cli_args.subcommand == 'run':
+               if cli_args.build_dir:
+                       if not os.path.exists(cli_args.build_dir):
+                               os.mkdir(cli_args.build_dir)
+                       kunit_kernel.kunitconfig_path = os.path.join(
+                               cli_args.build_dir,
+                               kunit_kernel.kunitconfig_path)
+
                if cli_args.defconfig:
                        create_default_kunitconfig()
 
index bf38768..cc5d844 100644 (file)
@@ -14,7 +14,7 @@ import os
 import kunit_config
 
 KCONFIG_PATH = '.config'
-KUNITCONFIG_PATH = 'kunitconfig'
+kunitconfig_path = '.kunitconfig'
 
 class ConfigError(Exception):
        """Represents an error trying to configure the Linux kernel."""
@@ -82,7 +82,7 @@ class LinuxSourceTree(object):
 
        def __init__(self):
                self._kconfig = kunit_config.Kconfig()
-               self._kconfig.read_from_file(KUNITCONFIG_PATH)
+               self._kconfig.read_from_file(kunitconfig_path)
                self._ops = LinuxSourceTreeOperations()
 
        def clean(self):
@@ -111,7 +111,7 @@ class LinuxSourceTree(object):
                return True
 
        def build_reconfig(self, build_dir):
-               """Creates a new .config if it is not a subset of the kunitconfig."""
+               """Creates a new .config if it is not a subset of the .kunitconfig."""
                kconfig_path = get_kconfig_path(build_dir)
                if os.path.exists(kconfig_path):
                        existing_kconfig = kunit_config.Kconfig()
@@ -140,10 +140,10 @@ class LinuxSourceTree(object):
                        return False
                return True
 
-       def run_kernel(self, args=[], timeout=None, build_dir=None):
+       def run_kernel(self, args=[], timeout=None, build_dir=''):
                args.extend(['mem=256M'])
                process = self._ops.linux_bin(args, timeout, build_dir)
-               with open('test.log', 'w') as f:
+               with open(os.path.join(build_dir, 'test.log'), 'w') as f:
                        for line in process.stdout:
                                f.write(line.rstrip().decode('ascii') + '\n')
                                yield line.rstrip().decode('ascii')
index a2a8ea6..cba9775 100755 (executable)
@@ -174,6 +174,7 @@ class KUnitMainTest(unittest.TestCase):
                kunit.main(['run'], self.linux_source_mock)
                assert self.linux_source_mock.build_reconfig.call_count == 1
                assert self.linux_source_mock.run_kernel.call_count == 1
+               self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='', timeout=300)
                self.print_mock.assert_any_call(StrContains('Testing complete.'))
 
        def test_run_passes_args_fail(self):
@@ -199,7 +200,14 @@ class KUnitMainTest(unittest.TestCase):
                timeout = 3453
                kunit.main(['run', '--timeout', str(timeout)], self.linux_source_mock)
                assert self.linux_source_mock.build_reconfig.call_count == 1
-               self.linux_source_mock.run_kernel.assert_called_once_with(build_dir=None, timeout=timeout)
+               self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='', timeout=timeout)
+               self.print_mock.assert_any_call(StrContains('Testing complete.'))
+
+       def test_run_builddir(self):
+               build_dir = '.kunit'
+               kunit.main(['run', '--build_dir', build_dir], self.linux_source_mock)
+               assert self.linux_source_mock.build_reconfig.call_count == 1
+               self.linux_source_mock.run_kernel.assert_called_once_with(build_dir=build_dir, timeout=300)
                self.print_mock.assert_any_call(StrContains('Testing complete.'))
 
 if __name__ == '__main__':
index e62f3d4..78ae4aa 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 
 CFLAGS += -I../../../../../usr/include/
-LDFLAGS += -lpthread
+LDLIBS += -lpthread
 TEST_GEN_PROGS := epoll_wakeup_test
 
 include ../../lib.mk
index b879305..5b8c0fe 100755 (executable)
@@ -34,6 +34,12 @@ test_modprobe()
 
 check_mods()
 {
+       local uid=$(id -u)
+       if [ $uid -ne 0 ]; then
+               echo "skip all tests: must be run as root" >&2
+               exit $ksft_skip
+       fi
+
        trap "test_modprobe" EXIT
        if [ ! -d $DIR ]; then
                modprobe test_firmware
index 31eb09e..a6e3d55 100644 (file)
@@ -7,6 +7,9 @@
 MAX_RETRIES=600
 RETRY_INTERVAL=".1"    # seconds
 
+# Kselftest framework requirement - SKIP code is 4
+ksft_skip=4
+
 # log(msg) - write message to kernel log
 #      msg - insightful words
 function log() {
@@ -18,7 +21,16 @@ function log() {
 function skip() {
        log "SKIP: $1"
        echo "SKIP: $1" >&2
-       exit 4
+       exit $ksft_skip
+}
+
+# root test
+function is_root() {
+       uid=$(id -u)
+       if [ $uid -ne 0 ]; then
+               echo "skip all tests: must be run as root" >&2
+               exit $ksft_skip
+       fi
 }
 
 # die(msg) - game over, man
@@ -62,6 +74,7 @@ function set_ftrace_enabled() {
 #               for verbose livepatching output and turn on
 #               the ftrace_enabled sysctl.
 function setup_config() {
+       is_root
        push_config
        set_dynamic_debug
        set_ftrace_enabled 1
index dc2908c..a082127 100755 (executable)
@@ -8,8 +8,7 @@ MOD_LIVEPATCH=test_klp_state
 MOD_LIVEPATCH2=test_klp_state2
 MOD_LIVEPATCH3=test_klp_state3
 
-set_dynamic_debug
-
+setup_config
 
 # TEST: Loading and removing a module that modifies the system state
 
index eec2663..e8a657a 100644 (file)
@@ -15,7 +15,7 @@
 #include <errno.h>
 #include <stddef.h>
 
-static inline pid_t gettid(void)
+static inline pid_t rseq_gettid(void)
 {
        return syscall(__NR_gettid);
 }
@@ -373,11 +373,12 @@ void *test_percpu_spinlock_thread(void *arg)
                rseq_percpu_unlock(&data->lock, cpu);
 #ifndef BENCHMARK
                if (i != 0 && !(i % (reps / 10)))
-                       printf_verbose("tid %d: count %lld\n", (int) gettid(), i);
+                       printf_verbose("tid %d: count %lld\n",
+                                      (int) rseq_gettid(), i);
 #endif
        }
        printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
-                      (int) gettid(), nr_abort, signals_delivered);
+                      (int) rseq_gettid(), nr_abort, signals_delivered);
        if (!opt_disable_rseq && thread_data->reg &&
            rseq_unregister_current_thread())
                abort();
@@ -454,11 +455,12 @@ void *test_percpu_inc_thread(void *arg)
                } while (rseq_unlikely(ret));
 #ifndef BENCHMARK
                if (i != 0 && !(i % (reps / 10)))
-                       printf_verbose("tid %d: count %lld\n", (int) gettid(), i);
+                       printf_verbose("tid %d: count %lld\n",
+                                      (int) rseq_gettid(), i);
 #endif
        }
        printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
-                      (int) gettid(), nr_abort, signals_delivered);
+                      (int) rseq_gettid(), nr_abort, signals_delivered);
        if (!opt_disable_rseq && thread_data->reg &&
            rseq_unregister_current_thread())
                abort();
@@ -605,7 +607,7 @@ void *test_percpu_list_thread(void *arg)
        }
 
        printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
-                      (int) gettid(), nr_abort, signals_delivered);
+                      (int) rseq_gettid(), nr_abort, signals_delivered);
        if (!opt_disable_rseq && rseq_unregister_current_thread())
                abort();
 
@@ -796,7 +798,7 @@ void *test_percpu_buffer_thread(void *arg)
        }
 
        printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
-                      (int) gettid(), nr_abort, signals_delivered);
+                      (int) rseq_gettid(), nr_abort, signals_delivered);
        if (!opt_disable_rseq && rseq_unregister_current_thread())
                abort();
 
@@ -1011,7 +1013,7 @@ void *test_percpu_memcpy_buffer_thread(void *arg)
        }
 
        printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
-                      (int) gettid(), nr_abort, signals_delivered);
+                      (int) rseq_gettid(), nr_abort, signals_delivered);
        if (!opt_disable_rseq && rseq_unregister_current_thread())
                abort();
 
index d40d60e..3f63eb3 100644 (file)
@@ -149,11 +149,13 @@ static inline void rseq_clear_rseq_cs(void)
 /*
  * rseq_prepare_unload() should be invoked by each thread executing a rseq
  * critical section at least once between their last critical section and
- * library unload of the library defining the rseq critical section
- * (struct rseq_cs). This also applies to use of rseq in code generated by
- * JIT: rseq_prepare_unload() should be invoked at least once by each
- * thread executing a rseq critical section before reclaim of the memory
- * holding the struct rseq_cs.
+ * library unload of the library defining the rseq critical section (struct
+ * rseq_cs) or the code referred to by the struct rseq_cs start_ip and
+ * post_commit_offset fields. This also applies to use of rseq in code
+ * generated by JIT: rseq_prepare_unload() should be invoked at least once by
+ * each thread executing a rseq critical section before reclaim of the memory
+ * holding the struct rseq_cs or reclaim of the code pointed to by struct
+ * rseq_cs start_ip and post_commit_offset fields.
  */
 static inline void rseq_prepare_unload(void)
 {
diff --git a/tools/testing/selftests/rseq/settings b/tools/testing/selftests/rseq/settings
new file mode 100644 (file)
index 0000000..e7b9417
--- /dev/null
@@ -0,0 +1 @@
+timeout=0