Merge tag 'net-5.15-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 23 Sep 2021 17:30:31 +0000 (10:30 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 23 Sep 2021 17:30:31 +0000 (10:30 -0700)
Pull networking fixes from Jakub Kicinski:
 "Current release - regressions:

   - dsa: bcm_sf2: fix array overrun in bcm_sf2_num_active_ports()

  Previous releases - regressions:

   - introduce a shutdown method to mdio device drivers, and make DSA
     switch drivers compatible with masters disappearing on shutdown;
     preventing infinite reference wait

   - fix issues in mdiobus users related to ->shutdown vs ->remove

   - virtio-net: fix pages leaking when building skb in big mode

   - xen-netback: correct success/error reporting for the
     SKB-with-fraglist

   - dsa: tear down devlink port regions when tearing down the devlink
     port on error

   - nexthop: fix division by zero while replacing a resilient group

   - hns3: check queue, vf, vlan ids range before using

  Previous releases - always broken:

   - napi: fix race against netpoll causing NAPI getting stuck

   - mlx4_en: ensure link operstate is updated even if link comes up
     before netdev registration

   - bnxt_en: fix TX timeout when TX ring size is set to the smallest

   - enetc: fix illegal access when reading affinity_hint; prevent oops
     on sysfs access

   - mtk_eth_soc: avoid creating duplicate offload entries

  Misc:

   - core: correct the sock::sk_lock.owned lockdep annotations"

* tag 'net-5.15-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (51 commits)
  atlantic: Fix issue in the pm resume flow.
  net/mlx4_en: Don't allow aRFS for encapsulated packets
  net: mscc: ocelot: fix forwarding from BLOCKING ports remaining enabled
  net: ethernet: mtk_eth_soc: avoid creating duplicate offload entries
  nfc: st-nci: Add SPI ID matching DT compatible
  MAINTAINERS: remove Guvenc Gulce as net/smc maintainer
  nexthop: Fix memory leaks in nexthop notification chain listeners
  mptcp: ensure tx skbs always have the MPTCP ext
  qed: rdma - don't wait for resources under hw error recovery flow
  s390/qeth: fix deadlock during failing recovery
  s390/qeth: Fix deadlock in remove_discipline
  s390/qeth: fix NULL deref in qeth_clear_working_pool_list()
  net: dsa: realtek: register the MDIO bus under devres
  net: dsa: don't allocate the slave_mii_bus using devres
  Doc: networking: Fox a typo in ice.rst
  net: dsa: fix dsa_tree_setup error path
  net/smc: fix 'workqueue leaked lock' in smc_conn_abort_work
  net/smc: add missing error check in smc_clc_prfx_set()
  net: hns3: fix a return value error in hclge_get_reset_status()
  net: hns3: check vlan id before using it
  ...

228 files changed:
Documentation/devicetree/bindings/arm/tegra.yaml
Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml [new file with mode: 0644]
MAINTAINERS
Makefile
arch/alpha/Kconfig
arch/alpha/include/asm/asm-prototypes.h
arch/alpha/include/asm/jensen.h
arch/alpha/kernel/sys_jensen.c
arch/alpha/lib/Makefile
arch/alpha/lib/udiv-qrnnd.S [moved from arch/alpha/math-emu/qrnnd.S with 98% similarity]
arch/alpha/math-emu/Makefile
arch/alpha/math-emu/math.c
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/process.c
arch/parisc/lib/iomap.c
arch/powerpc/kernel/interrupt.c
arch/powerpc/kernel/interrupt_64.S
arch/powerpc/kernel/mce.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/sysdev/xics/xics-common.c
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/net/bpf_jit_comp.c
arch/s390/pci/pci_mmio.c
arch/sh/boot/Makefile
arch/sparc/kernel/ioport.c
arch/sparc/lib/iomap.c
arch/x86/Kconfig
arch/x86/Makefile_32.cpu
arch/x86/kernel/cpu/mce/core.c
arch/x86/mm/init_64.c
arch/x86/mm/pat/memtype.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/mmu_pv.c
block/blk-cgroup.c
block/blk-integrity.c
block/blk-mq-tag.c
drivers/base/power/trace.c
drivers/cpufreq/cpufreq_governor_attr_set.c
drivers/cpufreq/intel_pstate.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h
drivers/gpu/drm/amd/pm/inc/smu_types.h
drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
drivers/gpu/drm/etnaviv/etnaviv_iommu.c
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.h
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
drivers/gpu/drm/i915/gt/intel_rps.c
drivers/gpu/drm/i915/gt/uc/intel_uc.c
drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/net/hamradio/dmascc.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/configfs.c
drivers/of/device.c
drivers/of/property.c
drivers/pci/pci-acpi.c
drivers/pci/quirks.c
drivers/pci/vpd.c
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/dell/Kconfig
drivers/platform/x86/gigabyte-wmi.c
drivers/platform/x86/intel/hid.c
drivers/platform/x86/intel/punit_ipc.c
drivers/platform/x86/lg-laptop.c
drivers/platform/x86/touchscreen_dmi.c
drivers/regulator/max14577-regulator.c
drivers/regulator/qcom-rpmh-regulator.c
drivers/s390/char/sclp_early.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_queue.c
drivers/spi/spi-rockchip.c
drivers/spi/spi-tegra20-slink.c
drivers/spi/spi.c
drivers/video/fbdev/Kconfig
drivers/xen/Kconfig
drivers/xen/balloon.c
drivers/xen/swiotlb-xen.c
fs/afs/callback.c
fs/afs/cell.c
fs/afs/dir.c
fs/afs/dir_edit.c
fs/afs/file.c
fs/afs/fs_probe.c
fs/afs/fsclient.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/protocol_afs.h [new file with mode: 0644]
fs/afs/protocol_yfs.h
fs/afs/rotate.c
fs/afs/server.c
fs/afs/super.c
fs/afs/write.c
fs/cifs/cache.c
fs/cifs/cifs_debug.c
fs/cifs/cifs_fs_sb.h
fs/cifs/cifs_ioctl.h
fs/cifs/cifs_spnego.c
fs/cifs/cifs_spnego.h
fs/cifs/cifs_unicode.c
fs/cifs/cifsacl.c
fs/cifs/cifsacl.h
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/dns_resolve.c
fs/cifs/dns_resolve.h
fs/cifs/export.c
fs/cifs/file.c
fs/cifs/fscache.c
fs/cifs/fscache.h
fs/cifs/inode.c
fs/cifs/ioctl.c
fs/cifs/link.c
fs/cifs/misc.c
fs/cifs/netmisc.c
fs/cifs/ntlmssp.h
fs/cifs/readdir.c
fs/cifs/rfc1002pdu.h
fs/cifs/sess.c
fs/cifs/smb2file.c
fs/cifs/smb2glob.h
fs/cifs/smb2inode.c
fs/cifs/smb2misc.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smb2proto.h
fs/cifs/smb2status.h
fs/cifs/smb2transport.c
fs/cifs/smberr.h
fs/cifs/transport.c
fs/cifs/winucase.c
fs/cifs/xattr.c
fs/io-wq.c
fs/io_uring.c
fs/ksmbd/misc.c
fs/ksmbd/misc.h
fs/ksmbd/smb2pdu.c
fs/ksmbd/transport_rdma.c
fs/lockd/svcxdr.h
fs/nfsd/nfs4state.c
fs/qnx4/dir.c
fs/smbfs_common/smbfsctl.h
include/asm-generic/io.h
include/asm-generic/iomap.h
include/asm-generic/pci_iomap.h
include/linux/sched.h
include/linux/uio.h
include/trace/events/afs.h
include/uapi/linux/cifs/cifs_mount.h
include/uapi/linux/io_uring.h
init/main.c
kernel/dma/debug.c
kernel/dma/mapping.c
kernel/events/core.c
kernel/locking/rwbase_rt.c
lib/iov_iter.c
lib/pci_iomap.c
mm/memcontrol.c
mm/memory.c
mm/workingset.c
scripts/Makefile.clang
scripts/Makefile.modpost
scripts/checkkconfigsymbols.py
scripts/clang-tools/gen_compile_commands.py
tools/lib/perf/evsel.c
tools/perf/builtin-script.c
tools/perf/ui/browser.c
tools/perf/ui/browser.h
tools/perf/ui/browsers/annotate.c
tools/perf/util/bpf-event.c
tools/perf/util/machine.c
tools/testing/selftests/powerpc/tm/tm-syscall-asm.S
tools/testing/selftests/powerpc/tm/tm-syscall.c

index b962fa6..d79d36a 100644 (file)
@@ -54,7 +54,7 @@ properties:
           - const: toradex,apalis_t30
           - const: nvidia,tegra30
       - items:
-          - const: toradex,apalis_t30-eval-v1.1
+          - const: toradex,apalis_t30-v1.1-eval
           - const: toradex,apalis_t30-eval
           - const: toradex,apalis_t30-v1.1
           - const: toradex,apalis_t30
index fbb59c9..78044c3 100644 (file)
@@ -9,7 +9,7 @@ function block.
 
 All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node.
 For a description of the MMSYS_CONFIG binding, see
-Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt.
+Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml.
 
 DISP function blocks
 ====================
diff --git a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
new file mode 100644 (file)
index 0000000..b9ca8ef
--- /dev/null
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ufs/samsung,exynos-ufs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung SoC series UFS host controller Device Tree Bindings
+
+maintainers:
+  - Alim Akhtar <alim.akhtar@samsung.com>
+
+description: |
+  Each Samsung UFS host controller instance should have its own node.
+  This binding define Samsung specific binding other then what is used
+  in the common ufshcd bindings
+  [1] Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+
+properties:
+
+  compatible:
+    enum:
+      - samsung,exynos7-ufs
+
+  reg:
+    items:
+      - description: HCI register
+      - description: vendor specific register
+      - description: unipro register
+      - description: UFS protector register
+
+  reg-names:
+    items:
+      - const: hci
+      - const: vs_hci
+      - const: unipro
+      - const: ufsp
+
+  clocks:
+    items:
+      - description: ufs link core clock
+      - description: unipro main clock
+
+  clock-names:
+    items:
+      - const: core_clk
+      - const: sclk_unipro_main
+
+  interrupts:
+    maxItems: 1
+
+  phys:
+    maxItems: 1
+
+  phy-names:
+    const: ufs-phy
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - phys
+  - phy-names
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/exynos7-clk.h>
+
+    ufs: ufs@15570000 {
+       compatible = "samsung,exynos7-ufs";
+       reg = <0x15570000 0x100>,
+             <0x15570100 0x100>,
+             <0x15571000 0x200>,
+             <0x15572000 0x300>;
+       reg-names = "hci", "vs_hci", "unipro", "ufsp";
+       interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
+       clocks = <&clock_fsys1 ACLK_UFS20_LINK>,
+                <&clock_fsys1 SCLK_UFSUNIPRO20_USER>;
+       clock-names = "core_clk", "sclk_unipro_main";
+       pinctrl-names = "default";
+       pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
+       phys = <&ufs_phy>;
+       phy-names = "ufs-phy";
+    };
+...
index 3c81497..7f46153 100644 (file)
@@ -2804,9 +2804,8 @@ F:        arch/arm/mach-pxa/include/mach/vpac270.h
 F:     arch/arm/mach-pxa/vpac270.c
 
 ARM/VT8500 ARM ARCHITECTURE
-M:     Tony Prisk <linux@prisktech.co.nz>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     Maintained
+S:     Orphan
 F:     Documentation/devicetree/bindings/i2c/i2c-wmt.txt
 F:     arch/arm/mach-vt8500/
 F:     drivers/clocksource/timer-vt8500.c
@@ -13255,9 +13254,9 @@ F:      Documentation/scsi/NinjaSCSI.rst
 F:     drivers/scsi/nsp32*
 
 NIOS2 ARCHITECTURE
-M:     Ley Foon Tan <ley.foon.tan@intel.com>
+M:     Dinh Nguyen <dinguyen@kernel.org>
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
 F:     arch/nios2/
 
 NITRO ENCLAVES (NE)
@@ -14342,7 +14341,8 @@ F:      Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml
 F:     drivers/pci/controller/pci-ixp4xx.c
 
 PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
-M:     Jonathan Derrick <jonathan.derrick@intel.com>
+M:     Nirmal Patel <nirmal.patel@linux.intel.com>
+R:     Jonathan Derrick <jonathan.derrick@linux.dev>
 L:     linux-pci@vger.kernel.org
 S:     Supported
 F:     drivers/pci/controller/vmd.c
@@ -17967,10 +17967,11 @@ F:    Documentation/admin-guide/svga.rst
 F:     arch/x86/boot/video*
 
 SWIOTLB SUBSYSTEM
-M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M:     Christoph Hellwig <hch@infradead.org>
 L:     iommu@lists.linux-foundation.org
 S:     Supported
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
+W:     http://git.infradead.org/users/hch/dma-mapping.git
+T:     git git://git.infradead.org/users/hch/dma-mapping.git
 F:     arch/*/kernel/pci-swiotlb.c
 F:     include/linux/swiotlb.h
 F:     kernel/dma/swiotlb.c
@@ -20473,7 +20474,6 @@ F:      samples/bpf/xdpsock*
 F:     tools/lib/bpf/xsk*
 
 XEN BLOCK SUBSYSTEM
-M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 M:     Roger Pau Monné <roger.pau@citrix.com>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
 S:     Supported
@@ -20521,7 +20521,7 @@ S:      Supported
 F:     drivers/net/xen-netback/*
 
 XEN PCI SUBSYSTEM
-M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M:     Juergen Gross <jgross@suse.com>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
 S:     Supported
 F:     arch/x86/pci/*xen*
@@ -20544,7 +20544,8 @@ S:      Supported
 F:     sound/xen/*
 
 XEN SWIOTLB SUBSYSTEM
-M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M:     Juergen Gross <jgross@suse.com>
+M:     Stefano Stabellini <sstabellini@kernel.org>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
 L:     iommu@lists.linux-foundation.org
 S:     Supported
index 34a0afc..5e7c1d8 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Opossums on Parade
 
 # *DOCUMENTATION*
index 02e5b67..4e87783 100644 (file)
@@ -20,7 +20,7 @@ config ALPHA
        select NEED_SG_DMA_LENGTH
        select VIRT_TO_BUS
        select GENERIC_IRQ_PROBE
-       select GENERIC_PCI_IOMAP if PCI
+       select GENERIC_PCI_IOMAP
        select AUTO_IRQ_AFFINITY if SMP
        select GENERIC_IRQ_SHOW
        select ARCH_WANT_IPC_PARSE_VERSION
@@ -199,7 +199,6 @@ config ALPHA_EIGER
 
 config ALPHA_JENSEN
        bool "Jensen"
-       depends on BROKEN
        select HAVE_EISA
        help
          DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
index b34cc1f..c8ae46f 100644 (file)
@@ -16,3 +16,4 @@ extern void __divlu(void);
 extern void __remlu(void);
 extern void __divqu(void);
 extern void __remqu(void);
+extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, unsigned long , unsigned long);
index 9168951..1c41314 100644 (file)
@@ -111,18 +111,18 @@ __EXTERN_INLINE void jensen_set_hae(unsigned long addr)
  * convinced that I need one of the newer machines.
  */
 
-static inline unsigned int jensen_local_inb(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_local_inb(unsigned long addr)
 {
        return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
 }
 
-static inline void jensen_local_outb(u8 b, unsigned long addr)
+__EXTERN_INLINE void jensen_local_outb(u8 b, unsigned long addr)
 {
        *(vuip)((addr << 9) + EISA_VL82C106) = b;
        mb();
 }
 
-static inline unsigned int jensen_bus_inb(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_bus_inb(unsigned long addr)
 {
        long result;
 
@@ -131,7 +131,7 @@ static inline unsigned int jensen_bus_inb(unsigned long addr)
        return __kernel_extbl(result, addr & 3);
 }
 
-static inline void jensen_bus_outb(u8 b, unsigned long addr)
+__EXTERN_INLINE void jensen_bus_outb(u8 b, unsigned long addr)
 {
        jensen_set_hae(0);
        *(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
index e5d870f..5c9c884 100644 (file)
@@ -7,6 +7,11 @@
  *
  * Code supporting the Jensen.
  */
+#define __EXTERN_INLINE
+#include <asm/io.h>
+#include <asm/jensen.h>
+#undef  __EXTERN_INLINE
+
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 
 #include <asm/ptrace.h>
 
-#define __EXTERN_INLINE inline
-#include <asm/io.h>
-#include <asm/jensen.h>
-#undef  __EXTERN_INLINE
-
 #include <asm/dma.h>
 #include <asm/irq.h>
 #include <asm/mmu_context.h>
index 854d5e7..1cc74f7 100644 (file)
@@ -14,6 +14,7 @@ ev6-$(CONFIG_ALPHA_EV6) := ev6-
 ev67-$(CONFIG_ALPHA_EV67) := ev67-
 
 lib-y =        __divqu.o __remqu.o __divlu.o __remlu.o \
+       udiv-qrnnd.o \
        udelay.o \
        $(ev6-y)memset.o \
        $(ev6-y)memcpy.o \
similarity index 98%
rename from arch/alpha/math-emu/qrnnd.S
rename to arch/alpha/lib/udiv-qrnnd.S
index d6373ec..b887aa5 100644 (file)
@@ -25,6 +25,7 @@
  # along with GCC; see the file COPYING.  If not, write to the 
  # Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  # MA 02111-1307, USA.
+#include <asm/export.h>
 
         .set noreorder
         .set noat
@@ -161,3 +162,4 @@ $Odd:
        ret     $31,($26),1
 
        .end    __udiv_qrnnd
+EXPORT_SYMBOL(__udiv_qrnnd)
index 6eda097..3206402 100644 (file)
@@ -7,4 +7,4 @@ ccflags-y := -w
 
 obj-$(CONFIG_MATHEMU) += math-emu.o
 
-math-emu-objs := math.o qrnnd.o
+math-emu-objs := math.o
index f7cef66..4212258 100644 (file)
@@ -403,5 +403,3 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
 egress:
        return si_code;
 }
-
-EXPORT_SYMBOL(__udiv_qrnnd);
index 5a294f2..ff49627 100644 (file)
@@ -513,7 +513,7 @@ size_t sve_state_size(struct task_struct const *task)
 void sve_alloc(struct task_struct *task)
 {
        if (task->thread.sve_state) {
-               memset(task->thread.sve_state, 0, sve_state_size(current));
+               memset(task->thread.sve_state, 0, sve_state_size(task));
                return;
        }
 
index 19100fe..40adb8c 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/nospec.h>
-#include <linux/sched.h>
 #include <linux/stddef.h>
 #include <linux/sysctl.h>
 #include <linux/unistd.h>
@@ -58,7 +57,7 @@
 
 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
 #include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
+unsigned long __stack_chk_guard __ro_after_init;
 EXPORT_SYMBOL(__stack_chk_guard);
 #endif
 
index f03adb1..367f639 100644 (file)
@@ -513,12 +513,15 @@ void ioport_unmap(void __iomem *addr)
        }
 }
 
+#ifdef CONFIG_PCI
 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
 {
        if (!INDIRECT_ADDR(addr)) {
                iounmap(addr);
        }
 }
+EXPORT_SYMBOL(pci_iounmap);
+#endif
 
 EXPORT_SYMBOL(ioread8);
 EXPORT_SYMBOL(ioread16);
@@ -544,4 +547,3 @@ EXPORT_SYMBOL(iowrite16_rep);
 EXPORT_SYMBOL(iowrite32_rep);
 EXPORT_SYMBOL(ioport_map);
 EXPORT_SYMBOL(ioport_unmap);
-EXPORT_SYMBOL(pci_iounmap);
index a73f3f7..de10a26 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/switch_to.h>
 #include <asm/syscall.h>
 #include <asm/time.h>
+#include <asm/tm.h>
 #include <asm/unistd.h>
 
 #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
@@ -136,6 +137,48 @@ notrace long system_call_exception(long r3, long r4, long r5,
         */
        irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
 
+       /*
+        * If system call is called with TM active, set _TIF_RESTOREALL to
+        * prevent RFSCV being used to return to userspace, because POWER9
+        * TM implementation has problems with this instruction returning to
+        * transactional state. Final register values are not relevant because
+        * the transaction will be aborted upon return anyway. Or in the case
+        * of unsupported_scv SIGILL fault, the return state does not much
+        * matter because it's an edge case.
+        */
+       if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+                       unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
+               current_thread_info()->flags |= _TIF_RESTOREALL;
+
+       /*
+        * If the system call was made with a transaction active, doom it and
+        * return without performing the system call. Unless it was an
+        * unsupported scv vector, in which case it's treated like an illegal
+        * instruction.
+        */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
+           !trap_is_unsupported_scv(regs)) {
+               /* Enable TM in the kernel, and disable EE (for scv) */
+               hard_irq_disable();
+               mtmsr(mfmsr() | MSR_TM);
+
+               /* tabort, this dooms the transaction, nothing else */
+               asm volatile(".long 0x7c00071d | ((%0) << 16)"
+                               :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
+
+               /*
+                * Userspace will never see the return value. Execution will
+                * resume after the tbegin. of the aborted transaction with the
+                * checkpointed register state. A context switch could occur
+                * or signal delivered to the process before resuming the
+                * doomed transaction context, but that should all be handled
+                * as expected.
+                */
+               return -ENOSYS;
+       }
+#endif // CONFIG_PPC_TRANSACTIONAL_MEM
+
        local_irq_enable();
 
        if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
index d4212d2..ec950b0 100644 (file)
@@ -12,7 +12,6 @@
 #include <asm/mmu.h>
 #include <asm/ppc_asm.h>
 #include <asm/ptrace.h>
-#include <asm/tm.h>
 
        .section        ".toc","aw"
 SYS_CALL_TABLE:
@@ -55,12 +54,6 @@ COMPAT_SYS_CALL_TABLE:
        .globl system_call_vectored_\name
 system_call_vectored_\name:
 _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
-       extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
-       bne     tabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
        SCV_INTERRUPT_TO_KERNEL
        mr      r10,r1
        ld      r1,PACAKSAVE(r13)
@@ -247,12 +240,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_common_real)
        .globl system_call_common
 system_call_common:
 _ASM_NOKPROBE_SYMBOL(system_call_common)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
-       extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
-       bne     tabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
        mr      r10,r1
        ld      r1,PACAKSAVE(r13)
        std     r10,0(r1)
@@ -425,34 +412,6 @@ SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
 RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
 #endif
 
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-tabort_syscall:
-_ASM_NOKPROBE_SYMBOL(tabort_syscall)
-       /* Firstly we need to enable TM in the kernel */
-       mfmsr   r10
-       li      r9, 1
-       rldimi  r10, r9, MSR_TM_LG, 63-MSR_TM_LG
-       mtmsrd  r10, 0
-
-       /* tabort, this dooms the transaction, nothing else */
-       li      r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
-       TABORT(R9)
-
-       /*
-        * Return directly to userspace. We have corrupted user register state,
-        * but userspace will never see that register state. Execution will
-        * resume after the tbegin of the aborted transaction with the
-        * checkpointed register state.
-        */
-       li      r9, MSR_RI
-       andc    r10, r10, r9
-       mtmsrd  r10, 1
-       mtspr   SPRN_SRR0, r11
-       mtspr   SPRN_SRR1, r12
-       RFI_TO_USER
-       b       .       /* prevent speculative execution */
-#endif
-
        /*
         * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
         * touched, no exit work created, then this can be used.
index 47a683c..fd829f7 100644 (file)
@@ -249,6 +249,7 @@ void machine_check_queue_event(void)
 {
        int index;
        struct machine_check_event evt;
+       unsigned long msr;
 
        if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
                return;
@@ -262,8 +263,20 @@ void machine_check_queue_event(void)
        memcpy(&local_paca->mce_info->mce_event_queue[index],
               &evt, sizeof(evt));
 
-       /* Queue irq work to process this event later. */
-       irq_work_queue(&mce_event_process_work);
+       /*
+        * Queue irq work to process this event later. Before
+        * queuing the work enable translation for non radix LPAR,
+        * as irq_work_queue may try to access memory outside RMO
+        * region.
+        */
+       if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) {
+               msr = mfmsr();
+               mtmsr(msr | MSR_IR | MSR_DR);
+               irq_work_queue(&mce_event_process_work);
+               mtmsr(msr);
+       } else {
+               irq_work_queue(&mce_event_process_work);
+       }
 }
 
 void mce_common_process_ue(struct pt_regs *regs,
index 7507939..9048442 100644 (file)
@@ -2536,7 +2536,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
        /* The following code handles the fake_suspend = 1 case */
        mflr    r0
        std     r0, PPC_LR_STKOFF(r1)
-       stdu    r1, -PPC_MIN_STKFRM(r1)
+       stdu    r1, -TM_FRAME_SIZE(r1)
 
        /* Turn on TM. */
        mfmsr   r8
@@ -2551,10 +2551,42 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
        nop
 
+       /*
+        * It's possible that treclaim. may modify registers, if we have lost
+        * track of fake-suspend state in the guest due to it using rfscv.
+        * Save and restore registers in case this occurs.
+        */
+       mfspr   r3, SPRN_DSCR
+       mfspr   r4, SPRN_XER
+       mfspr   r5, SPRN_AMR
+       /* SPRN_TAR would need to be saved here if the kernel ever used it */
+       mfcr    r12
+       SAVE_NVGPRS(r1)
+       SAVE_GPR(2, r1)
+       SAVE_GPR(3, r1)
+       SAVE_GPR(4, r1)
+       SAVE_GPR(5, r1)
+       stw     r12, 8(r1)
+       std     r1, HSTATE_HOST_R1(r13)
+
        /* We have to treclaim here because that's the only way to do S->N */
        li      r3, TM_CAUSE_KVM_RESCHED
        TRECLAIM(R3)
 
+       GET_PACA(r13)
+       ld      r1, HSTATE_HOST_R1(r13)
+       REST_GPR(2, r1)
+       REST_GPR(3, r1)
+       REST_GPR(4, r1)
+       REST_GPR(5, r1)
+       lwz     r12, 8(r1)
+       REST_NVGPRS(r1)
+       mtspr   SPRN_DSCR, r3
+       mtspr   SPRN_XER, r4
+       mtspr   SPRN_AMR, r5
+       mtcr    r12
+       HMT_MEDIUM
+
        /*
         * We were in fake suspend, so we are not going to save the
         * register state as the guest checkpointed state (since
@@ -2582,7 +2614,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
        std     r5, VCPU_TFHAR(r9)
        std     r6, VCPU_TFIAR(r9)
 
-       addi    r1, r1, PPC_MIN_STKFRM
+       addi    r1, r1, TM_FRAME_SIZE
        ld      r0, PPC_LR_STKOFF(r1)
        mtlr    r0
        blr
index 5c1a157..244a727 100644 (file)
@@ -348,9 +348,9 @@ static int xics_host_map(struct irq_domain *domain, unsigned int virq,
        if (xics_ics->check(xics_ics, hwirq))
                return -EINVAL;
 
-       /* No chip data for the XICS domain */
+       /* Let the ICS be the chip data for the XICS domain. For ICS native */
        irq_domain_set_info(domain, virq, hwirq, xics_ics->chip,
-                           NULL, handle_fasteoi_irq, NULL, NULL);
+                           xics_ics, handle_fasteoi_irq, NULL, NULL);
 
        return 0;
 }
index 2bd90c5..b86de61 100644 (file)
@@ -685,16 +685,6 @@ config STACK_GUARD
          The minimum size for the stack guard should be 256 for 31 bit and
          512 for 64 bit.
 
-config WARN_DYNAMIC_STACK
-       def_bool n
-       prompt "Emit compiler warnings for function with dynamic stack usage"
-       help
-         This option enables the compiler option -mwarn-dynamicstack. If the
-         compiler supports this options generates warnings for functions
-         that dynamically allocate stack space using alloca.
-
-         Say N if you are unsure.
-
 endmenu
 
 menu "I/O subsystem"
index a3cf33a..450b351 100644 (file)
@@ -85,13 +85,6 @@ cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
 endif
 endif
 
-ifdef CONFIG_WARN_DYNAMIC_STACK
-  ifneq ($(call cc-option,-mwarn-dynamicstack),)
-    KBUILD_CFLAGS += -mwarn-dynamicstack
-    KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack
-  endif
-endif
-
 ifdef CONFIG_EXPOLINE
   ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),)
     CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
index 37b6115..6aad18e 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_BPF_JIT=y
 CONFIG_BPF_JIT_ALWAYS_ON=y
 CONFIG_BPF_LSM=y
 CONFIG_PREEMPT=y
+CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -503,6 +504,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_LITEX is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
@@ -661,7 +663,6 @@ CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
 CONFIG_NFSD_V4_SECURITY_LABEL=y
 CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
 CONFIG_CIFS_UPCALL=y
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
@@ -720,6 +721,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
 CONFIG_CRYPTO_BLAKE2S=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
@@ -774,7 +777,6 @@ CONFIG_RANDOM32_SELFTEST=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_DMA_API_DEBUG=y
-CONFIG_STRING_SELFTEST=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
@@ -853,12 +855,12 @@ CONFIG_FAIL_FUNCTION=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LKDTM=m
 CONFIG_TEST_MIN_HEAP=y
-CONFIG_TEST_SORT=y
 CONFIG_KPROBES_SANITY_TEST=y
 CONFIG_RBTREE_TEST=y
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_STRING_SELFTEST=y
 CONFIG_TEST_BITOPS=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_LIVEPATCH=m
index 56a1cc8..f08b161 100644 (file)
@@ -8,6 +8,7 @@ CONFIG_BPF_SYSCALL=y
 CONFIG_BPF_JIT=y
 CONFIG_BPF_JIT_ALWAYS_ON=y
 CONFIG_BPF_LSM=y
+CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -494,6 +495,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_LITEX is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
@@ -648,7 +650,6 @@ CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
 CONFIG_NFSD_V4_SECURITY_LABEL=y
 CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
 CONFIG_CIFS_UPCALL=y
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
@@ -708,6 +709,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
 CONFIG_CRYPTO_BLAKE2S=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
index 8841926..840d859 100644 (file)
@@ -248,8 +248,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
 
 #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask)            \
 ({                                                             \
-       /* Branch instruction needs 6 bytes */                  \
-       int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
+       int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2;      \
        _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
        REG_SET_SEEN(b1);                                       \
        REG_SET_SEEN(b2);                                       \
@@ -761,10 +760,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT4(0xb9080000, dst_reg, src_reg);
                break;
        case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
-               if (!imm)
-                       break;
-               /* alfi %dst,imm */
-               EMIT6_IMM(0xc20b0000, dst_reg, imm);
+               if (imm != 0) {
+                       /* alfi %dst,imm */
+                       EMIT6_IMM(0xc20b0000, dst_reg, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
@@ -786,17 +785,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT4(0xb9090000, dst_reg, src_reg);
                break;
        case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
-               if (!imm)
-                       break;
-               /* alfi %dst,-imm */
-               EMIT6_IMM(0xc20b0000, dst_reg, -imm);
+               if (imm != 0) {
+                       /* alfi %dst,-imm */
+                       EMIT6_IMM(0xc20b0000, dst_reg, -imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
                if (!imm)
                        break;
-               /* agfi %dst,-imm */
-               EMIT6_IMM(0xc2080000, dst_reg, -imm);
+               if (imm == -0x80000000) {
+                       /* algfi %dst,0x80000000 */
+                       EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
+               } else {
+                       /* agfi %dst,-imm */
+                       EMIT6_IMM(0xc2080000, dst_reg, -imm);
+               }
                break;
        /*
         * BPF_MUL
@@ -811,10 +815,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT4(0xb90c0000, dst_reg, src_reg);
                break;
        case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
-               if (imm == 1)
-                       break;
-               /* msfi %r5,imm */
-               EMIT6_IMM(0xc2010000, dst_reg, imm);
+               if (imm != 1) {
+                       /* msfi %r5,imm */
+                       EMIT6_IMM(0xc2010000, dst_reg, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
@@ -867,6 +871,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                        if (BPF_OP(insn->code) == BPF_MOD)
                                /* lhgi %dst,0 */
                                EMIT4_IMM(0xa7090000, dst_reg, 0);
+                       else
+                               EMIT_ZERO(dst_reg);
                        break;
                }
                /* lhi %w0,0 */
@@ -999,10 +1005,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT4(0xb9820000, dst_reg, src_reg);
                break;
        case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
-               if (!imm)
-                       break;
-               /* xilf %dst,imm */
-               EMIT6_IMM(0xc0070000, dst_reg, imm);
+               if (imm != 0) {
+                       /* xilf %dst,imm */
+                       EMIT6_IMM(0xc0070000, dst_reg, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
@@ -1033,10 +1039,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
                break;
        case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
-               if (imm == 0)
-                       break;
-               /* sll %dst,imm(%r0) */
-               EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
+               if (imm != 0) {
+                       /* sll %dst,imm(%r0) */
+                       EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
@@ -1058,10 +1064,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
                break;
        case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
-               if (imm == 0)
-                       break;
-               /* srl %dst,imm(%r0) */
-               EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
+               if (imm != 0) {
+                       /* srl %dst,imm(%r0) */
+                       EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
@@ -1083,10 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
                break;
        case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
-               if (imm == 0)
-                       break;
-               /* sra %dst,imm(%r0) */
-               EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
+               if (imm != 0) {
+                       /* sra %dst,imm(%r0) */
+                       EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
index ae683aa..c5b35ea 100644 (file)
@@ -159,7 +159,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
 
        mmap_read_lock(current->mm);
        ret = -EINVAL;
-       vma = find_vma(current->mm, mmio_addr);
+       vma = vma_lookup(current->mm, mmio_addr);
        if (!vma)
                goto out_unlock_mmap;
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
@@ -298,7 +298,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
 
        mmap_read_lock(current->mm);
        ret = -EINVAL;
-       vma = find_vma(current->mm, mmio_addr);
+       vma = vma_lookup(current->mm, mmio_addr);
        if (!vma)
                goto out_unlock_mmap;
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
index 58592df..c081e7e 100644 (file)
@@ -80,30 +80,30 @@ $(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
 $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
        $(call if_changed,lzo)
 
-$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
        $(call if_changed,uimage,bzip2)
 
-$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
        $(call if_changed,uimage,gzip)
 
-$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
        $(call if_changed,uimage,lzma)
 
-$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz
+$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE
        $(call if_changed,uimage,xz)
 
-$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
        $(call if_changed,uimage,lzo)
 
-$(obj)/uImage.bin: $(obj)/vmlinux.bin
+$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
        $(call if_changed,uimage,none)
 
 OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
-$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux
+$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux FORCE
        $(call if_changed,objcopy)
 
 OBJCOPYFLAGS_uImage.srec := -I binary -O srec
-$(obj)/uImage.srec: $(obj)/uImage
+$(obj)/uImage.srec: $(obj)/uImage FORCE
        $(call if_changed,objcopy)
 
 $(obj)/uImage: $(obj)/uImage.$(suffix-y)
index 8e1d72a..7ceae24 100644 (file)
@@ -356,7 +356,9 @@ err_nomem:
 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
                dma_addr_t dma_addr, unsigned long attrs)
 {
-       if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size)))
+       size = PAGE_ALIGN(size);
+
+       if (!sparc_dma_free_resource(cpu_addr, size))
                return;
 
        dma_make_coherent(dma_addr, size);
index c9da9f1..f3a8cd4 100644 (file)
@@ -19,8 +19,10 @@ void ioport_unmap(void __iomem *addr)
 EXPORT_SYMBOL(ioport_map);
 EXPORT_SYMBOL(ioport_unmap);
 
+#ifdef CONFIG_PCI
 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
 {
        /* nothing to do */
 }
 EXPORT_SYMBOL(pci_iounmap);
+#endif
index 4e001bb..dad7f85 100644 (file)
@@ -339,6 +339,11 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
 config ARCH_HIBERNATION_POSSIBLE
        def_bool y
 
+config ARCH_NR_GPIO
+       int
+       default 1024 if X86_64
+       default 512
+
 config ARCH_SUSPEND_POSSIBLE
        def_bool y
 
index e7355f8..94834c4 100644 (file)
@@ -4,6 +4,12 @@
 
 tune           = $(call cc-option,-mtune=$(1),$(2))
 
+ifdef CONFIG_CC_IS_CLANG
+align          := -falign-functions=0 $(call cc-option,-falign-jumps=0) $(call cc-option,-falign-loops=0)
+else
+align          := -falign-functions=0 -falign-jumps=0 -falign-loops=0
+endif
+
 cflags-$(CONFIG_M486SX)                += -march=i486
 cflags-$(CONFIG_M486)          += -march=i486
 cflags-$(CONFIG_M586)          += -march=i586
@@ -19,11 +25,11 @@ cflags-$(CONFIG_MK6)                += -march=k6
 # They make zero difference whatsosever to performance at this time.
 cflags-$(CONFIG_MK7)           += -march=athlon
 cflags-$(CONFIG_MK8)           += $(call cc-option,-march=k8,-march=athlon)
-cflags-$(CONFIG_MCRUSOE)       += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
-cflags-$(CONFIG_MEFFICEON)     += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+cflags-$(CONFIG_MCRUSOE)       += -march=i686 $(align)
+cflags-$(CONFIG_MEFFICEON)     += -march=i686 $(call tune,pentium3) $(align)
 cflags-$(CONFIG_MWINCHIPC6)    += $(call cc-option,-march=winchip-c6,-march=i586)
 cflags-$(CONFIG_MWINCHIP3D)    += $(call cc-option,-march=winchip2,-march=i586)
-cflags-$(CONFIG_MCYRIXIII)     += $(call cc-option,-march=c3,-march=i486) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+cflags-$(CONFIG_MCYRIXIII)     += $(call cc-option,-march=c3,-march=i486) $(align)
 cflags-$(CONFIG_MVIAC3_2)      += $(call cc-option,-march=c3-2,-march=i686)
 cflags-$(CONFIG_MVIAC7)                += -march=i686
 cflags-$(CONFIG_MCORE2)                += -march=i686 $(call tune,core2)
index 8cb7816..193204a 100644 (file)
@@ -1253,6 +1253,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
 
 static void kill_me_now(struct callback_head *ch)
 {
+       struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
+
+       p->mce_count = 0;
        force_sig(SIGBUS);
 }
 
@@ -1262,6 +1265,7 @@ static void kill_me_maybe(struct callback_head *cb)
        int flags = MF_ACTION_REQUIRED;
        int ret;
 
+       p->mce_count = 0;
        pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
 
        if (!p->mce_ripv)
@@ -1290,17 +1294,34 @@ static void kill_me_maybe(struct callback_head *cb)
        }
 }
 
-static void queue_task_work(struct mce *m, int kill_current_task)
+static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
 {
-       current->mce_addr = m->addr;
-       current->mce_kflags = m->kflags;
-       current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
-       current->mce_whole_page = whole_page(m);
+       int count = ++current->mce_count;
 
-       if (kill_current_task)
-               current->mce_kill_me.func = kill_me_now;
-       else
-               current->mce_kill_me.func = kill_me_maybe;
+       /* First call, save all the details */
+       if (count == 1) {
+               current->mce_addr = m->addr;
+               current->mce_kflags = m->kflags;
+               current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
+               current->mce_whole_page = whole_page(m);
+
+               if (kill_current_task)
+                       current->mce_kill_me.func = kill_me_now;
+               else
+                       current->mce_kill_me.func = kill_me_maybe;
+       }
+
+       /* Ten is likely overkill. Don't expect more than two faults before task_work() */
+       if (count > 10)
+               mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
+
+       /* Second or later call, make sure page address matches the one from first call */
+       if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
+               mce_panic("Consecutive machine checks to different user pages", m, msg);
+
+       /* Do not call task_work_add() more than once */
+       if (count > 1)
+               return;
 
        task_work_add(current, &current->mce_kill_me, TWA_RESUME);
 }
@@ -1438,7 +1459,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
                /* If this triggers there is no way to recover. Die hard. */
                BUG_ON(!on_thread_stack() || !user_mode(regs));
 
-               queue_task_work(&m, kill_current_task);
+               queue_task_work(&m, msg, kill_current_task);
 
        } else {
                /*
@@ -1456,7 +1477,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
                }
 
                if (m.kflags & MCE_IN_KERNEL_COPYIN)
-                       queue_task_work(&m, kill_current_task);
+                       queue_task_work(&m, msg, kill_current_task);
        }
 out:
        mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
index a6e1176..3609822 100644 (file)
@@ -1432,18 +1432,18 @@ int kern_addr_valid(unsigned long addr)
                return 0;
 
        p4d = p4d_offset(pgd, addr);
-       if (p4d_none(*p4d))
+       if (!p4d_present(*p4d))
                return 0;
 
        pud = pud_offset(p4d, addr);
-       if (pud_none(*pud))
+       if (!pud_present(*pud))
                return 0;
 
        if (pud_large(*pud))
                return pfn_valid(pud_pfn(*pud));
 
        pmd = pmd_offset(pud, addr);
-       if (pmd_none(*pmd))
+       if (!pmd_present(*pmd))
                return 0;
 
        if (pmd_large(*pmd))
index 3112ca7..4ba2a3e 100644 (file)
@@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
        int err = 0;
 
        start = sanitize_phys(start);
-       end = sanitize_phys(end);
+
+       /*
+        * The end address passed into this function is exclusive, but
+        * sanitize_phys() expects an inclusive address.
+        */
+       end = sanitize_phys(end - 1) + 1;
        if (start >= end) {
                WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
                                start, end - 1, cattr_name(req_type));
index 753f637..349f780 100644 (file)
@@ -1214,6 +1214,11 @@ static void __init xen_dom0_set_legacy_features(void)
        x86_platform.legacy.rtc = 1;
 }
 
+static void __init xen_domu_set_legacy_features(void)
+{
+       x86_platform.legacy.rtc = 0;
+}
+
 /* First C function to be called on Xen boot */
 asmlinkage __visible void __init xen_start_kernel(void)
 {
@@ -1359,6 +1364,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
                add_preferred_console("xenboot", 0, NULL);
                if (pci_xen)
                        x86_init.pci.arch_init = pci_xen_init;
+               x86_platform.set_legacy_features =
+                               xen_domu_set_legacy_features;
        } else {
                const struct dom0_vga_console_info *info =
                        (void *)((char *)xen_start_info +
index 1df5f01..8d75193 100644 (file)
@@ -1518,14 +1518,17 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
        if (pinned) {
                struct page *page = pfn_to_page(pfn);
 
-               if (static_branch_likely(&xen_struct_pages_ready))
+               pinned = false;
+               if (static_branch_likely(&xen_struct_pages_ready)) {
+                       pinned = PagePinned(page);
                        SetPagePinned(page);
+               }
 
                xen_mc_batch();
 
                __set_pfn_prot(pfn, PAGE_KERNEL_RO);
 
-               if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
+               if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
                        __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
 
                xen_mc_issue(PARAVIRT_LAZY_MMU);
index 3c88a79..38b9f76 100644 (file)
@@ -1182,10 +1182,6 @@ int blkcg_init_queue(struct request_queue *q)
        if (preloaded)
                radix_tree_preload_end();
 
-       ret = blk_iolatency_init(q);
-       if (ret)
-               goto err_destroy_all;
-
        ret = blk_ioprio_init(q);
        if (ret)
                goto err_destroy_all;
@@ -1194,6 +1190,12 @@ int blkcg_init_queue(struct request_queue *q)
        if (ret)
                goto err_destroy_all;
 
+       ret = blk_iolatency_init(q);
+       if (ret) {
+               blk_throtl_exit(q);
+               goto err_destroy_all;
+       }
+
        return 0;
 
 err_destroy_all:
@@ -1364,10 +1366,14 @@ enomem:
        /* alloc failed, nothing's initialized yet, free everything */
        spin_lock_irq(&q->queue_lock);
        list_for_each_entry(blkg, &q->blkg_list, q_node) {
+               struct blkcg *blkcg = blkg->blkcg;
+
+               spin_lock(&blkcg->lock);
                if (blkg->pd[pol->plid]) {
                        pol->pd_free_fn(blkg->pd[pol->plid]);
                        blkg->pd[pol->plid] = NULL;
                }
+               spin_unlock(&blkcg->lock);
        }
        spin_unlock_irq(&q->queue_lock);
        ret = -ENOMEM;
@@ -1399,12 +1405,16 @@ void blkcg_deactivate_policy(struct request_queue *q,
        __clear_bit(pol->plid, q->blkcg_pols);
 
        list_for_each_entry(blkg, &q->blkg_list, q_node) {
+               struct blkcg *blkcg = blkg->blkcg;
+
+               spin_lock(&blkcg->lock);
                if (blkg->pd[pol->plid]) {
                        if (pol->pd_offline_fn)
                                pol->pd_offline_fn(blkg->pd[pol->plid]);
                        pol->pd_free_fn(blkg->pd[pol->plid]);
                        blkg->pd[pol->plid] = NULL;
                }
+               spin_unlock(&blkcg->lock);
        }
 
        spin_unlock_irq(&q->queue_lock);
index 69a1217..16d5d53 100644 (file)
@@ -426,8 +426,15 @@ EXPORT_SYMBOL(blk_integrity_register);
  */
 void blk_integrity_unregister(struct gendisk *disk)
 {
+       struct blk_integrity *bi = &disk->queue->integrity;
+
+       if (!bi->profile)
+               return;
+
+       /* ensure all bios are off the integrity workqueue */
+       blk_flush_integrity();
        blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
-       memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
+       memset(bi, 0, sizeof(*bi));
 }
 EXPORT_SYMBOL(blk_integrity_unregister);
 
index 86f8734..ff5caeb 100644 (file)
@@ -208,7 +208,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
 
        spin_lock_irqsave(&tags->lock, flags);
        rq = tags->rqs[bitnr];
-       if (!rq || !refcount_inc_not_zero(&rq->ref))
+       if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
                rq = NULL;
        spin_unlock_irqrestore(&tags->lock, flags);
        return rq;
index a97f33d..9466503 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/export.h>
 #include <linux/rtc.h>
 #include <linux/suspend.h>
+#include <linux/init.h>
 
 #include <linux/mc146818rtc.h>
 
@@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
        const char *file = *(const char **)(tracedata + 2);
        unsigned int user_hash_value, file_hash_value;
 
+       if (!x86_platform.legacy.rtc)
+               return;
+
        user_hash_value = user % USERHASH;
        file_hash_value = hash_string(lineno, file, FILEHASH);
        set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
@@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
 
 static int __init early_resume_init(void)
 {
+       if (!x86_platform.legacy.rtc)
+               return 0;
+
        hash_value_early_read = read_magic_time();
        register_pm_notifier(&pm_trace_nb);
        return 0;
@@ -277,6 +284,9 @@ static int __init late_resume_init(void)
        unsigned int val = hash_value_early_read;
        unsigned int user, file, dev;
 
+       if (!x86_platform.legacy.rtc)
+               return 0;
+
        user = val % USERHASH;
        val = val / USERHASH;
        file = val % FILEHASH;
index 66b05a3..a6f365b 100644 (file)
@@ -74,8 +74,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l
        if (count)
                return count;
 
-       kobject_put(&attr_set->kobj);
        mutex_destroy(&attr_set->update_lock);
+       kobject_put(&attr_set->kobj);
        return 0;
 }
 EXPORT_SYMBOL_GPL(gov_attr_set_put);
index 1097f82..8c176b7 100644 (file)
@@ -3205,11 +3205,15 @@ static int __init intel_pstate_init(void)
        if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
                return -ENODEV;
 
-       if (no_load)
-               return -ENODEV;
-
        id = x86_match_cpu(hwp_support_ids);
        if (id) {
+               bool hwp_forced = intel_pstate_hwp_is_enabled();
+
+               if (hwp_forced)
+                       pr_info("HWP enabled by BIOS\n");
+               else if (no_load)
+                       return -ENODEV;
+
                copy_cpu_funcs(&core_funcs);
                /*
                 * Avoid enabling HWP for processors without EPP support,
@@ -3219,8 +3223,7 @@ static int __init intel_pstate_init(void)
                 * If HWP is enabled already, though, there is no choice but to
                 * deal with it.
                 */
-               if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
-                   intel_pstate_hwp_is_enabled()) {
+               if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
                        hwp_active++;
                        hwp_mode_bdw = id->driver_data;
                        intel_pstate.attr = hwp_cpufreq_attrs;
@@ -3235,7 +3238,11 @@ static int __init intel_pstate_init(void)
 
                        goto hwp_cpu_matched;
                }
+               pr_info("HWP not enabled\n");
        } else {
+               if (no_load)
+                       return -ENODEV;
+
                id = x86_match_cpu(intel_pstate_cpu_ids);
                if (!id) {
                        pr_info("CPU model not supported\n");
@@ -3314,10 +3321,9 @@ static int __init intel_pstate_setup(char *str)
        else if (!strcmp(str, "passive"))
                default_driver = &intel_cpufreq;
 
-       if (!strcmp(str, "no_hwp")) {
-               pr_info("HWP disabled\n");
+       if (!strcmp(str, "no_hwp"))
                no_hwp = 1;
-       }
+
        if (!strcmp(str, "force"))
                force_load = 1;
        if (!strcmp(str, "hwp_only"))
index dc3c6b3..d356e32 100644 (file)
@@ -758,7 +758,7 @@ enum amd_hw_ip_block_type {
        MAX_HWIP
 };
 
-#define HWIP_MAX_INSTANCE      8
+#define HWIP_MAX_INSTANCE      10
 
 struct amd_powerplay {
        void *pp_handle;
index 3003ee1..1d41c2c 100644 (file)
@@ -192,6 +192,16 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
                kgd2kfd_suspend(adev->kfd.dev, run_pm);
 }
 
+int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
+{
+       int r = 0;
+
+       if (adev->kfd.dev)
+               r = kgd2kfd_resume_iommu(adev->kfd.dev);
+
+       return r;
+}
+
 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
 {
        int r = 0;
index ec028cf..3bc52b2 100644 (file)
@@ -137,6 +137,7 @@ int amdgpu_amdkfd_init(void);
 void amdgpu_amdkfd_fini(void);
 
 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
+int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
                        const void *ih_ring_entry);
@@ -327,6 +328,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
                         const struct kgd2kfd_shared_resources *gpu_resources);
 void kgd2kfd_device_exit(struct kfd_dev *kfd);
 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
+int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
 int kgd2kfd_pre_reset(struct kfd_dev *kfd);
 int kgd2kfd_post_reset(struct kfd_dev *kfd);
@@ -365,6 +367,11 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
 {
 }
 
+static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
+{
+       return 0;
+}
+
 static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
 {
        return 0;
index 2771288..463b9c0 100644 (file)
@@ -1544,20 +1544,18 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
        struct dentry *ent;
        int r, i;
 
-
-
        ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
                                  &fops_ib_preempt);
-       if (!ent) {
+       if (IS_ERR(ent)) {
                DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
-               return -EIO;
+               return PTR_ERR(ent);
        }
 
        ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
                                  &fops_sclk_set);
-       if (!ent) {
+       if (IS_ERR(ent)) {
                DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
-               return -EIO;
+               return PTR_ERR(ent);
        }
 
        /* Register debugfs entries for amdgpu_ttm */
index 41c6b3a..ab3794c 100644 (file)
@@ -2394,6 +2394,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
        if (r)
                goto init_failed;
 
+       r = amdgpu_amdkfd_resume_iommu(adev);
+       if (r)
+               goto init_failed;
+
        r = amdgpu_device_ip_hw_init_phase1(adev);
        if (r)
                goto init_failed;
@@ -3148,6 +3152,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
 {
        int r;
 
+       r = amdgpu_amdkfd_resume_iommu(adev);
+       if (r)
+               return r;
+
        r = amdgpu_device_ip_resume_phase1(adev);
        if (r)
                return r;
@@ -4601,6 +4609,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
                                dev_warn(tmp_adev->dev, "asic atom init failed!");
                        } else {
                                dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
+                               r = amdgpu_amdkfd_resume_iommu(tmp_adev);
+                               if (r)
+                                       goto out;
+
                                r = amdgpu_device_ip_resume_phase1(tmp_adev);
                                if (r)
                                        goto out;
index c7797ea..9ff600a 100644 (file)
@@ -598,7 +598,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
                break;
        default:
                adev->gmc.tmz_enabled = false;
-               dev_warn(adev->dev,
+               dev_info(adev->dev,
                         "Trusted Memory Zone (TMZ) feature not supported\n");
                break;
        }
index dc44c94..9873251 100644 (file)
@@ -757,7 +757,7 @@ Out:
        return res;
 }
 
-inline uint32_t amdgpu_ras_eeprom_max_record_count(void)
+uint32_t amdgpu_ras_eeprom_max_record_count(void)
 {
        return RAS_MAX_RECORD_COUNT;
 }
index f95fc61..6bb0057 100644 (file)
@@ -120,7 +120,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
 int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
                             struct eeprom_table_record *records, const u32 num);
 
-inline uint32_t amdgpu_ras_eeprom_max_record_count(void);
+uint32_t amdgpu_ras_eeprom_max_record_count(void);
 
 void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
 
index 7b634a1..0554576 100644 (file)
@@ -428,8 +428,8 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
        ent = debugfs_create_file(name,
                                  S_IFREG | S_IRUGO, root,
                                  ring, &amdgpu_debugfs_ring_fops);
-       if (!ent)
-               return -ENOMEM;
+       if (IS_ERR(ent))
+               return PTR_ERR(ent);
 
        i_size_write(ent->d_inode, ring->ring_size + 12);
        ring->ent = ent;
index 38dade4..94126dc 100644 (file)
@@ -515,6 +515,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                goto out;
        }
 
+       if (bo->type == ttm_bo_type_device &&
+           new_mem->mem_type == TTM_PL_VRAM &&
+           old_mem->mem_type != TTM_PL_VRAM) {
+               /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
+                * accesses the BO after it's moved.
+                */
+               abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+       }
+
        if (adev->mman.buffer_funcs_enabled) {
                if (((old_mem->mem_type == TTM_PL_SYSTEM &&
                      new_mem->mem_type == TTM_PL_VRAM) ||
@@ -545,15 +554,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                        return r;
        }
 
-       if (bo->type == ttm_bo_type_device &&
-           new_mem->mem_type == TTM_PL_VRAM &&
-           old_mem->mem_type != TTM_PL_VRAM) {
-               /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
-                * accesses the BO after it's moved.
-                */
-               abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
-       }
-
 out:
        /* update statistics */
        atomic64_add(bo->base.size, &adev->num_bytes_moved);
index 16a57b7..98d1b3a 100644 (file)
@@ -468,6 +468,7 @@ static const struct kfd_device_info navi10_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 145,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -487,6 +488,7 @@ static const struct kfd_device_info navi12_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 145,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -506,6 +508,7 @@ static const struct kfd_device_info navi14_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 145,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -525,6 +528,7 @@ static const struct kfd_device_info sienna_cichlid_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 4,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -544,6 +548,7 @@ static const struct kfd_device_info navy_flounder_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -562,7 +567,8 @@ static const struct kfd_device_info vangogh_device_info = {
        .mqd_size_aligned = MQD_SIZE_ALIGNED,
        .needs_iommu_device = false,
        .supports_cwsr = true,
-       .needs_pci_atomics = false,
+       .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 1,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 2,
@@ -582,6 +588,7 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -601,6 +608,7 @@ static const struct kfd_device_info beige_goby_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 1,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -619,7 +627,8 @@ static const struct kfd_device_info yellow_carp_device_info = {
        .mqd_size_aligned = MQD_SIZE_ALIGNED,
        .needs_iommu_device = false,
        .supports_cwsr = true,
-       .needs_pci_atomics = false,
+       .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 1,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 2,
@@ -708,20 +717,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
        if (!kfd)
                return NULL;
 
-       /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
-        * 32 and 64-bit requests are possible and must be
-        * supported.
-        */
-       kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd);
-       if (device_info->needs_pci_atomics &&
-           !kfd->pci_atomic_requested) {
-               dev_info(kfd_device,
-                        "skipped device %x:%x, PCI rejects atomics\n",
-                        pdev->vendor, pdev->device);
-               kfree(kfd);
-               return NULL;
-       }
-
        kfd->kgd = kgd;
        kfd->device_info = device_info;
        kfd->pdev = pdev;
@@ -821,6 +816,23 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
        kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
                        - kfd->vm_info.first_vmid_kfd + 1;
 
+       /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+        * 32 and 64-bit requests are possible and must be
+        * supported.
+        */
+       kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd);
+       if (!kfd->pci_atomic_requested &&
+           kfd->device_info->needs_pci_atomics &&
+           (!kfd->device_info->no_atomic_fw_version ||
+            kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) {
+               dev_info(kfd_device,
+                        "skipped device %x:%x, PCI rejects atomics %d<%d\n",
+                        kfd->pdev->vendor, kfd->pdev->device,
+                        kfd->mec_fw_version,
+                        kfd->device_info->no_atomic_fw_version);
+               return false;
+       }
+
        /* Verify module parameters regarding mapped process number*/
        if ((hws_max_conc_proc < 0)
                        || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
@@ -1057,17 +1069,21 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
        return ret;
 }
 
-static int kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
 {
        int err = 0;
 
        err = kfd_iommu_resume(kfd);
-       if (err) {
+       if (err)
                dev_err(kfd_device,
                        "Failed to resume IOMMU for device %x:%x\n",
                        kfd->pdev->vendor, kfd->pdev->device);
-               return err;
-       }
+       return err;
+}
+
+static int kfd_resume(struct kfd_dev *kfd)
+{
+       int err = 0;
 
        err = kfd->dqm->ops.start(kfd->dqm);
        if (err) {
index ab83b0d..6d8f9bb 100644 (file)
@@ -207,6 +207,7 @@ struct kfd_device_info {
        bool supports_cwsr;
        bool needs_iommu_device;
        bool needs_pci_atomics;
+       uint32_t no_atomic_fw_version;
        unsigned int num_sdma_engines;
        unsigned int num_xgmi_sdma_engines;
        unsigned int num_sdma_queues_per_engine;
index 9b1fc54..66c799f 100644 (file)
@@ -998,6 +998,8 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
        uint32_t agp_base, agp_bot, agp_top;
        PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
 
+       memset(pa_config, 0, sizeof(*pa_config));
+
        logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
        pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
 
@@ -6024,21 +6026,23 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
                return 0;
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-       work = kzalloc(sizeof(*work), GFP_ATOMIC);
-       if (!work)
-               return -ENOMEM;
+       if (dm->vblank_control_workqueue) {
+               work = kzalloc(sizeof(*work), GFP_ATOMIC);
+               if (!work)
+                       return -ENOMEM;
 
-       INIT_WORK(&work->work, vblank_control_worker);
-       work->dm = dm;
-       work->acrtc = acrtc;
-       work->enable = enable;
+               INIT_WORK(&work->work, vblank_control_worker);
+               work->dm = dm;
+               work->acrtc = acrtc;
+               work->enable = enable;
 
-       if (acrtc_state->stream) {
-               dc_stream_retain(acrtc_state->stream);
-               work->stream = acrtc_state->stream;
-       }
+               if (acrtc_state->stream) {
+                       dc_stream_retain(acrtc_state->stream);
+                       work->stream = acrtc_state->stream;
+               }
 
-       queue_work(dm->vblank_control_workqueue, &work->work);
+               queue_work(dm->vblank_control_workqueue, &work->work);
+       }
 #endif
 
        return 0;
@@ -6792,14 +6796,15 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
-                                           struct dc_state *dc_state)
+                                           struct dc_state *dc_state,
+                                           struct dsc_mst_fairness_vars *vars)
 {
        struct dc_stream_state *stream = NULL;
        struct drm_connector *connector;
        struct drm_connector_state *new_con_state;
        struct amdgpu_dm_connector *aconnector;
        struct dm_connector_state *dm_conn_state;
-       int i, j, clock, bpp;
+       int i, j, clock;
        int vcpi, pbn_div, pbn = 0;
 
        for_each_new_connector_in_state(state, connector, new_con_state, i) {
@@ -6838,9 +6843,15 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
                }
 
                pbn_div = dm_mst_get_pbn_divider(stream->link);
-               bpp = stream->timing.dsc_cfg.bits_per_pixel;
                clock = stream->timing.pix_clk_100hz / 10;
-               pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
+               /* pbn is calculated by compute_mst_dsc_configs_for_state*/
+               for (j = 0; j < dc_state->stream_count; j++) {
+                       if (vars[j].aconnector == aconnector) {
+                               pbn = vars[j].pbn;
+                               break;
+                       }
+               }
+
                vcpi = drm_dp_mst_atomic_enable_dsc(state,
                                                    aconnector->port,
                                                    pbn, pbn_div,
@@ -7519,6 +7530,32 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
        }
 }
 
+static void amdgpu_set_panel_orientation(struct drm_connector *connector)
+{
+       struct drm_encoder *encoder;
+       struct amdgpu_encoder *amdgpu_encoder;
+       const struct drm_display_mode *native_mode;
+
+       if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+           connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+               return;
+
+       encoder = amdgpu_dm_connector_to_encoder(connector);
+       if (!encoder)
+               return;
+
+       amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+       native_mode = &amdgpu_encoder->native_mode;
+       if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
+               return;
+
+       drm_connector_set_panel_orientation_with_quirk(connector,
+                                                      DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+                                                      native_mode->hdisplay,
+                                                      native_mode->vdisplay);
+}
+
 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
                                              struct edid *edid)
 {
@@ -7547,6 +7584,8 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
                 * restored here.
                 */
                amdgpu_dm_update_freesync_caps(connector, edid);
+
+               amdgpu_set_panel_orientation(connector);
        } else {
                amdgpu_dm_connector->num_modes = 0;
        }
@@ -8058,8 +8097,26 @@ static bool is_content_protection_different(struct drm_connector_state *state,
            state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
                state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 
-       /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
-        * hot-plug, headless s3, dpms
+       /* Stream removed and re-enabled
+        *
+        * Can sometimes overlap with the HPD case,
+        * thus set update_hdcp to false to avoid
+        * setting HDCP multiple times.
+        *
+        * Handles:     DESIRED -> DESIRED (Special case)
+        */
+       if (!(old_state->crtc && old_state->crtc->enabled) &&
+               state->crtc && state->crtc->enabled &&
+               connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+               dm_con_state->update_hdcp = false;
+               return true;
+       }
+
+       /* Hot-plug, headless s3, dpms
+        *
+        * Only start HDCP if the display is connected/enabled.
+        * update_hdcp flag will be set to false until the next
+        * HPD comes in.
         *
         * Handles:     DESIRED -> DESIRED (Special case)
         */
@@ -8648,7 +8705,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                 * If PSR or idle optimizations are enabled then flush out
                 * any pending work before hardware programming.
                 */
-               flush_workqueue(dm->vblank_control_workqueue);
+               if (dm->vblank_control_workqueue)
+                       flush_workqueue(dm->vblank_control_workqueue);
 #endif
 
                bundle->stream_update.stream = acrtc_state->stream;
@@ -8983,7 +9041,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                /* if there mode set or reset, disable eDP PSR */
                if (mode_set_reset_required) {
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-                       flush_workqueue(dm->vblank_control_workqueue);
+                       if (dm->vblank_control_workqueue)
+                               flush_workqueue(dm->vblank_control_workqueue);
 #endif
                        amdgpu_dm_psr_disable_all(dm);
                }
@@ -10243,6 +10302,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
        int ret, i;
        bool lock_and_validation_needed = false;
        struct dm_crtc_state *dm_old_crtc_state;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+       struct dsc_mst_fairness_vars vars[MAX_PIPES];
+#endif
 
        trace_amdgpu_dm_atomic_check_begin(state);
 
@@ -10473,10 +10535,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        goto fail;
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-               if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
+               if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
                        goto fail;
 
-               ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
+               ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
                if (ret)
                        goto fail;
 #endif
@@ -10492,7 +10554,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        goto fail;
                status = dc_validate_global_state(dc, dm_state->context, false);
                if (status != DC_OK) {
-                       DC_LOG_WARNING("DC global validation failure: %s (%d)",
+                       drm_dbg_atomic(dev,
+                                      "DC global validation failure: %s (%d)",
                                       dc_status_to_str(status), status);
                        ret = -EINVAL;
                        goto fail;
index 1bcba69..7af0d58 100644 (file)
@@ -518,12 +518,7 @@ struct dsc_mst_fairness_params {
        uint32_t num_slices_h;
        uint32_t num_slices_v;
        uint32_t bpp_overwrite;
-};
-
-struct dsc_mst_fairness_vars {
-       int pbn;
-       bool dsc_enabled;
-       int bpp_x16;
+       struct amdgpu_dm_connector *aconnector;
 };
 
 static int kbps_to_peak_pbn(int kbps)
@@ -750,12 +745,12 @@ static void try_disable_dsc(struct drm_atomic_state *state,
 
 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
                                             struct dc_state *dc_state,
-                                            struct dc_link *dc_link)
+                                            struct dc_link *dc_link,
+                                            struct dsc_mst_fairness_vars *vars)
 {
        int i;
        struct dc_stream_state *stream;
        struct dsc_mst_fairness_params params[MAX_PIPES];
-       struct dsc_mst_fairness_vars vars[MAX_PIPES];
        struct amdgpu_dm_connector *aconnector;
        int count = 0;
        bool debugfs_overwrite = false;
@@ -776,6 +771,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
                params[count].timing = &stream->timing;
                params[count].sink = stream->sink;
                aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+               params[count].aconnector = aconnector;
                params[count].port = aconnector->port;
                params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
                if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
@@ -798,6 +794,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
        }
        /* Try no compression */
        for (i = 0; i < count; i++) {
+               vars[i].aconnector = params[i].aconnector;
                vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
                vars[i].dsc_enabled = false;
                vars[i].bpp_x16 = 0;
@@ -851,7 +848,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
 }
 
 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
-                                      struct dc_state *dc_state)
+                                      struct dc_state *dc_state,
+                                      struct dsc_mst_fairness_vars *vars)
 {
        int i, j;
        struct dc_stream_state *stream;
@@ -882,7 +880,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
                        return false;
 
                mutex_lock(&aconnector->mst_mgr.lock);
-               if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
+               if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) {
                        mutex_unlock(&aconnector->mst_mgr.lock);
                        return false;
                }
index b38bd68..900d3f7 100644 (file)
@@ -39,8 +39,17 @@ void
 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
+
+struct dsc_mst_fairness_vars {
+       int pbn;
+       bool dsc_enabled;
+       int bpp_x16;
+       struct amdgpu_dm_connector *aconnector;
+};
+
 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
-                                      struct dc_state *dc_state);
+                                      struct dc_state *dc_state,
+                                      struct dsc_mst_fairness_vars *vars);
 #endif
 
 #endif
index c9f47d1..b1bf80d 100644 (file)
@@ -62,7 +62,7 @@ inline void dc_assert_fp_enabled(void)
        depth = *pcpu;
        put_cpu_ptr(&fpu_recursion_depth);
 
-       ASSERT(depth > 1);
+       ASSERT(depth >= 1);
 }
 
 /**
index 8bd7f42..1e44b13 100644 (file)
@@ -2586,13 +2586,21 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
 
 int dc_link_get_backlight_level(const struct dc_link *link)
 {
-
        struct abm *abm = get_abm_from_stream_res(link);
+       struct panel_cntl *panel_cntl = link->panel_cntl;
+       struct dc  *dc = link->ctx->dc;
+       struct dmcu *dmcu = dc->res_pool->dmcu;
+       bool fw_set_brightness = true;
 
-       if (abm == NULL || abm->funcs->get_current_backlight == NULL)
-               return DC_ERROR_UNEXPECTED;
+       if (dmcu)
+               fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
 
-       return (int) abm->funcs->get_current_backlight(abm);
+       if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
+               return panel_cntl->funcs->get_current_backlight(panel_cntl);
+       else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
+               return (int) abm->funcs->get_current_backlight(abm);
+       else
+               return DC_ERROR_UNEXPECTED;
 }
 
 int dc_link_get_target_backlight_pwm(const struct dc_link *link)
index 330edd6..f6dbc5a 100644 (file)
@@ -1,4 +1,26 @@
-/* Copyright 2015 Advanced Micro Devices, Inc. */
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
 #include "dm_services.h"
 #include "dc.h"
 #include "dc_link_dp.h"
@@ -1840,9 +1862,13 @@ bool perform_link_training_with_retries(
                dp_disable_link_phy(link, signal);
 
                /* Abort link training if failure due to sink being unplugged. */
-               if (status == LINK_TRAINING_ABORT)
-                       break;
-               else if (do_fallback) {
+               if (status == LINK_TRAINING_ABORT) {
+                       enum dc_connection_type type = dc_connection_none;
+
+                       dc_link_detect_sink(link, &type);
+                       if (type == dc_connection_none)
+                               break;
+               } else if (do_fallback) {
                        decide_fallback_link_setting(*link_setting, &current_setting, status);
                        /* Fail link training if reduced link bandwidth no longer meets
                         * stream requirements.
index e923392..e857006 100644 (file)
@@ -49,7 +49,6 @@
 static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
 {
        uint64_t current_backlight;
-       uint32_t round_result;
        uint32_t bl_period, bl_int_count;
        uint32_t bl_pwm, fractional_duty_cycle_en;
        uint32_t bl_period_mask, bl_pwm_mask;
@@ -84,15 +83,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c
        current_backlight = div_u64(current_backlight, bl_period);
        current_backlight = (current_backlight + 1) >> 1;
 
-       current_backlight = (uint64_t)(current_backlight) * bl_period;
-
-       round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
-
-       round_result = (round_result >> (bl_int_count-1)) & 1;
-
-       current_backlight >>= bl_int_count;
-       current_backlight += round_result;
-
        return (uint32_t)(current_backlight);
 }
 
index 8a08ecc..4884a4e 100644 (file)
 #define TABLE_PMSTATUSLOG        3 // Called by Tools for Agm logging
 #define TABLE_DPMCLOCKS          4 // Called by Driver; defined here, but not used, for backward compatible
 #define TABLE_MOMENTARY_PM       5 // Called by Tools; defined here, but not used, for backward compatible
-#define TABLE_COUNT              6
+#define TABLE_SMU_METRICS        6 // Called by Driver
+#define TABLE_COUNT              7
 
-#define NUM_DSPCLK_LEVELS              8
-#define NUM_SOCCLK_DPM_LEVELS  8
-#define NUM_DCEFCLK_DPM_LEVELS 4
-#define NUM_FCLK_DPM_LEVELS            4
-#define NUM_MEMCLK_DPM_LEVELS  4
+typedef struct SmuMetricsTable_t {
+       //CPU status
+       uint16_t CoreFrequency[6];              //[MHz]
+       uint32_t CorePower[6];                  //[mW]
+       uint16_t CoreTemperature[6];            //[centi-Celsius]
+       uint16_t L3Frequency[2];                //[MHz]
+       uint16_t L3Temperature[2];              //[centi-Celsius]
+       uint16_t C0Residency[6];                //Percentage
 
-#define NUMBER_OF_PSTATES              8
-#define NUMBER_OF_CORES                        8
+       // GFX status
+       uint16_t GfxclkFrequency;               //[MHz]
+       uint16_t GfxTemperature;                //[centi-Celsius]
 
-typedef enum {
-       S3_TYPE_ENTRY,
-       S5_TYPE_ENTRY,
-} Sleep_Type_e;
+       // SOC IP info
+       uint16_t SocclkFrequency;               //[MHz]
+       uint16_t VclkFrequency;                 //[MHz]
+       uint16_t DclkFrequency;                 //[MHz]
+       uint16_t MemclkFrequency;               //[MHz]
 
-typedef enum {
-       GFX_OFF = 0,
-       GFX_ON  = 1,
-} GFX_Mode_e;
+       // power, VF info for CPU/GFX telemetry rails, and then socket power total
+       uint32_t Voltage[2];                    //[mV] indices: VDDCR_VDD, VDDCR_GFX
+       uint32_t Current[2];                    //[mA] indices: VDDCR_VDD, VDDCR_GFX
+       uint32_t Power[2];                      //[mW] indices: VDDCR_VDD, VDDCR_GFX
+       uint32_t CurrentSocketPower;            //[mW]
 
-typedef enum {
-       CPU_P0 = 0,
-       CPU_P1,
-       CPU_P2,
-       CPU_P3,
-       CPU_P4,
-       CPU_P5,
-       CPU_P6,
-       CPU_P7
-} CPU_PState_e;
+       uint16_t SocTemperature;                //[centi-Celsius]
+       uint16_t EdgeTemperature;
+       uint16_t ThrottlerStatus;
+       uint16_t Spare;
 
-typedef enum {
-       CPU_CORE0 = 0,
-       CPU_CORE1,
-       CPU_CORE2,
-       CPU_CORE3,
-       CPU_CORE4,
-       CPU_CORE5,
-       CPU_CORE6,
-       CPU_CORE7
-} CORE_ID_e;
+} SmuMetricsTable_t;
 
-typedef enum {
-       DF_DPM0 = 0,
-       DF_DPM1,
-       DF_DPM2,
-       DF_DPM3,
-       DF_PState_Count
-} DF_PState_e;
-
-typedef enum {
-       GFX_DPM0 = 0,
-       GFX_DPM1,
-       GFX_DPM2,
-       GFX_DPM3,
-       GFX_PState_Count
-} GFX_PState_e;
+typedef struct SmuMetrics_t {
+       SmuMetricsTable_t Current;
+       SmuMetricsTable_t Average;
+       uint32_t SampleStartTime;
+       uint32_t SampleStopTime;
+       uint32_t Accnt;
+} SmuMetrics_t;
 
 #endif
index 6f1b1b5..18b862a 100644 (file)
        __SMU_DUMMY_MAP(SetUclkDpmMode),                \
        __SMU_DUMMY_MAP(LightSBR),                      \
        __SMU_DUMMY_MAP(GfxDriverResetRecovery),        \
-       __SMU_DUMMY_MAP(BoardPowerCalibration),
+       __SMU_DUMMY_MAP(BoardPowerCalibration),   \
+       __SMU_DUMMY_MAP(RequestGfxclk),           \
+       __SMU_DUMMY_MAP(ForceGfxVid),             \
+       __SMU_DUMMY_MAP(UnforceGfxVid),
 
 #undef __SMU_DUMMY_MAP
 #define __SMU_DUMMY_MAP(type)  SMU_MSG_##type
index 6e60887..909a86a 100644 (file)
 #define PPSMC_MSG_SetDriverTableVMID                    0x34
 #define PPSMC_MSG_SetSoftMinCclk                        0x35
 #define PPSMC_MSG_SetSoftMaxCclk                        0x36
-#define PPSMC_Message_Count                             0x37
+#define PPSMC_MSG_GetGfxFrequency                       0x37
+#define PPSMC_MSG_GetGfxVid                             0x38
+#define PPSMC_MSG_ForceGfxFreq                          0x39
+#define PPSMC_MSG_UnForceGfxFreq                        0x3A
+#define PPSMC_MSG_ForceGfxVid                           0x3B
+#define PPSMC_MSG_UnforceGfxVid                         0x3C
+#define PPSMC_MSG_GetEnabledSmuFeatures                 0x3D
+#define PPSMC_Message_Count                             0x3E
 
 #endif
index 3ab1ce4..04863a7 100644 (file)
@@ -1404,7 +1404,7 @@ static int smu_disable_dpms(struct smu_context *smu)
         */
        if (smu->uploading_custom_pp_table &&
            (adev->asic_type >= CHIP_NAVI10) &&
-           (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
+           (adev->asic_type <= CHIP_BEIGE_GOBY))
                return smu_disable_all_features_with_exception(smu,
                                                               true,
                                                               SMU_FEATURE_COUNT);
index e343cc2..082f018 100644 (file)
@@ -771,8 +771,12 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
        struct smu_11_0_dpm_context *dpm_context = NULL;
        uint32_t gen_speed, lane_width;
 
-       if (amdgpu_ras_intr_triggered())
-               return sysfs_emit(buf, "unavailable\n");
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
+       if (amdgpu_ras_intr_triggered()) {
+               size += sysfs_emit_at(buf, size, "unavailable\n");
+               return size;
+       }
 
        dpm_context = smu_dpm->dpm_context;
 
index b05f954..3d4c65b 100644 (file)
 #undef pr_info
 #undef pr_debug
 
+/* unit: MHz */
+#define CYAN_SKILLFISH_SCLK_MIN                        1000
+#define CYAN_SKILLFISH_SCLK_MAX                        2000
+#define CYAN_SKILLFISH_SCLK_DEFAULT                    1800
+
+/* unit: mV */
+#define CYAN_SKILLFISH_VDDC_MIN                        700
+#define CYAN_SKILLFISH_VDDC_MAX                        1129
+#define CYAN_SKILLFISH_VDDC_MAGIC                      5118 // 0x13fe
+
+static struct gfx_user_settings {
+       uint32_t sclk;
+       uint32_t vddc;
+} cyan_skillfish_user_settings;
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE ( \
+       FEATURE_MASK(FEATURE_FCLK_DPM_BIT)      |       \
+       FEATURE_MASK(FEATURE_SOC_DPM_BIT)       |       \
+       FEATURE_MASK(FEATURE_GFX_DPM_BIT))
+
 static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = {
        MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                  0),
        MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,                0),
@@ -52,14 +73,473 @@ static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT]
        MSG_MAP(SetDriverDramAddrLow,           PPSMC_MSG_SetDriverTableDramAddrLow,    0),
        MSG_MAP(TransferTableSmu2Dram,          PPSMC_MSG_TransferTableSmu2Dram,        0),
        MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu,        0),
+       MSG_MAP(GetEnabledSmuFeatures,          PPSMC_MSG_GetEnabledSmuFeatures,        0),
+       MSG_MAP(RequestGfxclk,                  PPSMC_MSG_RequestGfxclk,                0),
+       MSG_MAP(ForceGfxVid,                    PPSMC_MSG_ForceGfxVid,                  0),
+       MSG_MAP(UnforceGfxVid,                  PPSMC_MSG_UnforceGfxVid,                0),
+};
+
+static struct cmn2asic_mapping cyan_skillfish_table_map[SMU_TABLE_COUNT] = {
+       TAB_MAP_VALID(SMU_METRICS),
 };
 
+static int cyan_skillfish_tables_init(struct smu_context *smu)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct smu_table *tables = smu_table->tables;
+
+       SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
+                               sizeof(SmuMetrics_t),
+                               PAGE_SIZE,
+                               AMDGPU_GEM_DOMAIN_VRAM);
+
+       smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+       if (!smu_table->metrics_table)
+               goto err0_out;
+
+       smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+       smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+       if (!smu_table->gpu_metrics_table)
+               goto err1_out;
+
+       smu_table->metrics_time = 0;
+
+       return 0;
+
+err1_out:
+       smu_table->gpu_metrics_table_size = 0;
+       kfree(smu_table->metrics_table);
+err0_out:
+       return -ENOMEM;
+}
+
+static int cyan_skillfish_init_smc_tables(struct smu_context *smu)
+{
+       int ret = 0;
+
+       ret = cyan_skillfish_tables_init(smu);
+       if (ret)
+               return ret;
+
+       return smu_v11_0_init_smc_tables(smu);
+}
+
+static int cyan_skillfish_finit_smc_tables(struct smu_context *smu)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+
+       kfree(smu_table->metrics_table);
+       smu_table->metrics_table = NULL;
+
+       kfree(smu_table->gpu_metrics_table);
+       smu_table->gpu_metrics_table = NULL;
+       smu_table->gpu_metrics_table_size = 0;
+
+       smu_table->metrics_time = 0;
+
+       return 0;
+}
+
+static int
+cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
+                                       MetricsMember_t member,
+                                       uint32_t *value)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+       int ret = 0;
+
+       mutex_lock(&smu->metrics_lock);
+
+       ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
+       if (ret) {
+               mutex_unlock(&smu->metrics_lock);
+               return ret;
+       }
+
+       switch (member) {
+       case METRICS_CURR_GFXCLK:
+               *value = metrics->Current.GfxclkFrequency;
+               break;
+       case METRICS_CURR_SOCCLK:
+               *value = metrics->Current.SocclkFrequency;
+               break;
+       case METRICS_CURR_VCLK:
+               *value = metrics->Current.VclkFrequency;
+               break;
+       case METRICS_CURR_DCLK:
+               *value = metrics->Current.DclkFrequency;
+               break;
+       case METRICS_CURR_UCLK:
+               *value = metrics->Current.MemclkFrequency;
+               break;
+       case METRICS_AVERAGE_SOCKETPOWER:
+               *value = (metrics->Current.CurrentSocketPower << 8) /
+                               1000;
+               break;
+       case METRICS_TEMPERATURE_EDGE:
+               *value = metrics->Current.GfxTemperature / 100 *
+                               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+               break;
+       case METRICS_TEMPERATURE_HOTSPOT:
+               *value = metrics->Current.SocTemperature / 100 *
+                               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+               break;
+       case METRICS_VOLTAGE_VDDSOC:
+               *value = metrics->Current.Voltage[0];
+               break;
+       case METRICS_VOLTAGE_VDDGFX:
+               *value = metrics->Current.Voltage[1];
+               break;
+       case METRICS_THROTTLER_STATUS:
+               *value = metrics->Current.ThrottlerStatus;
+               break;
+       default:
+               *value = UINT_MAX;
+               break;
+       }
+
+       mutex_unlock(&smu->metrics_lock);
+
+       return ret;
+}
+
+static int cyan_skillfish_read_sensor(struct smu_context *smu,
+                                       enum amd_pp_sensors sensor,
+                                       void *data,
+                                       uint32_t *size)
+{
+       int ret = 0;
+
+       if (!data || !size)
+               return -EINVAL;
+
+       mutex_lock(&smu->sensor_lock);
+
+       switch (sensor) {
+       case AMDGPU_PP_SENSOR_GFX_SCLK:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_CURR_GFXCLK,
+                                                  (uint32_t *)data);
+               *(uint32_t *)data *= 100;
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_GFX_MCLK:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_CURR_UCLK,
+                                                  (uint32_t *)data);
+               *(uint32_t *)data *= 100;
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_GPU_POWER:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_AVERAGE_SOCKETPOWER,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_TEMPERATURE_HOTSPOT,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_EDGE_TEMP:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_TEMPERATURE_EDGE,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_VDDNB:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_VOLTAGE_VDDSOC,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_VDDGFX:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_VOLTAGE_VDDGFX,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       mutex_unlock(&smu->sensor_lock);
+
+       return ret;
+}
+
+static int cyan_skillfish_get_current_clk_freq(struct smu_context *smu,
+                                               enum smu_clk_type clk_type,
+                                               uint32_t *value)
+{
+       MetricsMember_t member_type;
+
+       switch (clk_type) {
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+               member_type = METRICS_CURR_GFXCLK;
+               break;
+       case SMU_FCLK:
+       case SMU_MCLK:
+               member_type = METRICS_CURR_UCLK;
+               break;
+       case SMU_SOCCLK:
+               member_type = METRICS_CURR_SOCCLK;
+               break;
+       case SMU_VCLK:
+               member_type = METRICS_CURR_VCLK;
+               break;
+       case SMU_DCLK:
+               member_type = METRICS_CURR_DCLK;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return cyan_skillfish_get_smu_metrics_data(smu, member_type, value);
+}
+
+static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
+                                       enum smu_clk_type clk_type,
+                                       char *buf)
+{
+       int ret = 0, size = 0;
+       uint32_t cur_value = 0;
+
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
+       switch (clk_type) {
+       case SMU_OD_SCLK:
+               ret  = cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, &cur_value);
+               if (ret)
+                       return ret;
+               size += sysfs_emit_at(buf, size,"%s:\n", "OD_SCLK");
+               size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
+               break;
+       case SMU_OD_VDDC_CURVE:
+               ret  = cyan_skillfish_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDGFX, &cur_value);
+               if (ret)
+                       return ret;
+               size += sysfs_emit_at(buf, size,"%s:\n", "OD_VDDC");
+               size += sysfs_emit_at(buf, size, "0: %umV *\n", cur_value);
+               break;
+       case SMU_OD_RANGE:
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+                                               CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+               size += sysfs_emit_at(buf, size, "VDDC: %7umV  %10umV\n",
+                                               CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+               break;
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+       case SMU_FCLK:
+       case SMU_MCLK:
+       case SMU_SOCCLK:
+       case SMU_VCLK:
+       case SMU_DCLK:
+               ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value);
+               if (ret)
+                       return ret;
+               size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
+               break;
+       default:
+               dev_warn(smu->adev->dev, "Unsupported clock type\n");
+               return ret;
+       }
+
+       return size;
+}
+
+static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+       int ret = 0;
+       uint32_t feature_mask[2];
+       uint64_t feature_enabled;
+
+       /* we need to re-init after suspend so return false */
+       if (adev->in_suspend)
+               return false;
+
+       ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+
+       if (ret)
+               return false;
+
+       feature_enabled = (uint64_t)feature_mask[0] |
+                               ((uint64_t)feature_mask[1] << 32);
+
+       return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static ssize_t cyan_skillfish_get_gpu_metrics(struct smu_context *smu,
+                                               void **table)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct gpu_metrics_v2_2 *gpu_metrics =
+               (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
+       SmuMetrics_t metrics;
+       int i, ret = 0;
+
+       ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+       if (ret)
+               return ret;
+
+       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
+
+       gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
+       gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
+
+       gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
+       gpu_metrics->average_soc_power = metrics.Current.Power[0];
+       gpu_metrics->average_gfx_power = metrics.Current.Power[1];
+
+       gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
+       gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
+       gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
+       gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
+       gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
+       gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
+
+       gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
+       gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
+       gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
+       gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
+       gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
+       gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
+
+       for (i = 0; i < 6; i++) {
+               gpu_metrics->temperature_core[i] = metrics.Current.CoreTemperature[i];
+               gpu_metrics->average_core_power[i] = metrics.Average.CorePower[i];
+               gpu_metrics->current_coreclk[i] = metrics.Current.CoreFrequency[i];
+       }
+
+       for (i = 0; i < 2; i++) {
+               gpu_metrics->temperature_l3[i] = metrics.Current.L3Temperature[i];
+               gpu_metrics->current_l3clk[i] = metrics.Current.L3Frequency[i];
+       }
+
+       gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
+       gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+       *table = (void *)gpu_metrics;
+
+       return sizeof(struct gpu_metrics_v2_2);
+}
+
+static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu,
+                                       enum PP_OD_DPM_TABLE_COMMAND type,
+                                       long input[], uint32_t size)
+{
+       int ret = 0;
+       uint32_t vid;
+
+       switch (type) {
+       case PP_OD_EDIT_VDDC_CURVE:
+               if (size != 3 || input[0] != 0) {
+                       dev_err(smu->adev->dev, "Invalid parameter!\n");
+                       return -EINVAL;
+               }
+
+               if (input[1] <= CYAN_SKILLFISH_SCLK_MIN ||
+                       input[1] > CYAN_SKILLFISH_SCLK_MAX) {
+                       dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
+                                       CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+                       return -EINVAL;
+               }
+
+               if (input[2] <= CYAN_SKILLFISH_VDDC_MIN ||
+                       input[2] > CYAN_SKILLFISH_VDDC_MAX) {
+                       dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
+                                       CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+                       return -EINVAL;
+               }
+
+               cyan_skillfish_user_settings.sclk = input[1];
+               cyan_skillfish_user_settings.vddc = input[2];
+
+               break;
+       case PP_OD_RESTORE_DEFAULT_TABLE:
+               if (size != 0) {
+                       dev_err(smu->adev->dev, "Invalid parameter!\n");
+                       return -EINVAL;
+               }
+
+               cyan_skillfish_user_settings.sclk = CYAN_SKILLFISH_SCLK_DEFAULT;
+               cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC;
+
+               break;
+       case PP_OD_COMMIT_DPM_TABLE:
+               if (size != 0) {
+                       dev_err(smu->adev->dev, "Invalid parameter!\n");
+                       return -EINVAL;
+               }
+
+               if (cyan_skillfish_user_settings.sclk < CYAN_SKILLFISH_SCLK_MIN ||
+                   cyan_skillfish_user_settings.sclk > CYAN_SKILLFISH_SCLK_MAX) {
+                       dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
+                                       CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+                       return -EINVAL;
+               }
+
+               if ((cyan_skillfish_user_settings.vddc != CYAN_SKILLFISH_VDDC_MAGIC) &&
+                       (cyan_skillfish_user_settings.vddc < CYAN_SKILLFISH_VDDC_MIN ||
+                       cyan_skillfish_user_settings.vddc > CYAN_SKILLFISH_VDDC_MAX)) {
+                       dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
+                                       CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+                       return -EINVAL;
+               }
+
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestGfxclk,
+                                       cyan_skillfish_user_settings.sclk, NULL);
+               if (ret) {
+                       dev_err(smu->adev->dev, "Set sclk failed!\n");
+                       return ret;
+               }
+
+               if (cyan_skillfish_user_settings.vddc == CYAN_SKILLFISH_VDDC_MAGIC) {
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_UnforceGfxVid, NULL);
+                       if (ret) {
+                               dev_err(smu->adev->dev, "Unforce vddc failed!\n");
+                               return ret;
+                       }
+               } else {
+                       /*
+                        * PMFW accepts SVI2 VID code, convert voltage to VID:
+                        * vid = (uint32_t)((1.55 - voltage) * 160.0 + 0.00001)
+                        */
+                       vid = (1550 - cyan_skillfish_user_settings.vddc) * 160 / 1000;
+                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ForceGfxVid, vid, NULL);
+                       if (ret) {
+                               dev_err(smu->adev->dev, "Force vddc failed!\n");
+                               return ret;
+                       }
+               }
+
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return ret;
+}
+
 static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
 
        .check_fw_status = smu_v11_0_check_fw_status,
        .check_fw_version = smu_v11_0_check_fw_version,
        .init_power = smu_v11_0_init_power,
        .fini_power = smu_v11_0_fini_power,
+       .init_smc_tables = cyan_skillfish_init_smc_tables,
+       .fini_smc_tables = cyan_skillfish_finit_smc_tables,
+       .read_sensor = cyan_skillfish_read_sensor,
+       .print_clk_levels = cyan_skillfish_print_clk_levels,
+       .is_dpm_running = cyan_skillfish_is_dpm_running,
+       .get_gpu_metrics = cyan_skillfish_get_gpu_metrics,
+       .od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table,
        .register_irq_handler = smu_v11_0_register_irq_handler,
        .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
        .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
@@ -72,5 +552,6 @@ void cyan_skillfish_set_ppt_funcs(struct smu_context *smu)
 {
        smu->ppt_funcs = &cyan_skillfish_ppt_funcs;
        smu->message_map = cyan_skillfish_message_map;
+       smu->table_map = cyan_skillfish_table_map;
        smu->is_apu = true;
 }
index a5fc5d7..b1ad451 100644 (file)
@@ -1279,6 +1279,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
        struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
        uint32_t min_value, max_value;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_GFXCLK:
        case SMU_SCLK:
@@ -1392,7 +1394,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
        case SMU_OD_RANGE:
                if (!smu->od_enabled || !od_table || !od_settings)
                        break;
-               size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
 
                if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
                        navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
@@ -2272,7 +2274,27 @@ static int navi10_baco_enter(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       if (adev->in_runpm)
+       /*
+        * This aims the case below:
+        *   amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded
+        *
+        * For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To
+        * make that possible, PMFW needs to acknowledge the dstate transition
+        * process for both gfx(function 0) and audio(function 1) function of
+        * the ASIC.
+        *
+        * The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the
+        * device representing the audio function of the ASIC. And that means
+        * even if the sound driver(snd_hda_intel) was not loaded yet, it's still
+        * possible runpm suspend kicked on the ASIC. However without the dstate
+        * transition notification from audio function, pmfw cannot handle the
+        * BACO in/exit correctly. And that will cause driver hang on runpm
+        * resuming.
+        *
+        * To address this, we revert to legacy message way(driver masters the
+        * timing for BACO in/exit) on sound driver missing.
+        */
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
                return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
        else
                return smu_v11_0_baco_enter(smu);
@@ -2282,7 +2304,7 @@ static int navi10_baco_exit(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       if (adev->in_runpm) {
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
                /* Wait for PMFW handling for the Dstate change */
                msleep(10);
                return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
index 5e292c3..ca57221 100644 (file)
@@ -1058,6 +1058,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
        uint32_t min_value, max_value;
        uint32_t smu_version;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_GFXCLK:
        case SMU_SCLK:
@@ -1180,7 +1182,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
                if (!smu->od_enabled || !od_table || !od_settings)
                        break;
 
-               size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
 
                if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
                        sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN,
@@ -2187,7 +2189,7 @@ static int sienna_cichlid_baco_enter(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       if (adev->in_runpm)
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
                return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
        else
                return smu_v11_0_baco_enter(smu);
@@ -2197,7 +2199,7 @@ static int sienna_cichlid_baco_exit(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       if (adev->in_runpm) {
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
                /* Wait for PMFW handling for the Dstate change */
                msleep(10);
                return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
index 3a34214..f6ef0ce 100644 (file)
@@ -589,10 +589,12 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
        if (ret)
                return ret;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_OD_SCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
                        size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
                        size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -601,7 +603,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
                break;
        case SMU_OD_CCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
+                       size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
                        size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
                        size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -610,7 +612,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
                break;
        case SMU_OD_RANGE:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
                        size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
                        size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
@@ -688,10 +690,12 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
        if (ret)
                return ret;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_OD_SCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
                        size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
                        size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -700,7 +704,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
                break;
        case SMU_OD_CCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
+                       size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
                        size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
                        size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -709,7 +713,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
                break;
        case SMU_OD_RANGE:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
                        size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
                        size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
index 5aa175e..145f13b 100644 (file)
@@ -497,6 +497,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
        if (ret)
                return ret;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_OD_RANGE:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
index ab65202..5019903 100644 (file)
@@ -733,15 +733,19 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
        uint32_t freq_values[3] = {0};
        uint32_t min_clk, max_clk;
 
-       if (amdgpu_ras_intr_triggered())
-               return sysfs_emit(buf, "unavailable\n");
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
+       if (amdgpu_ras_intr_triggered()) {
+               size += sysfs_emit_at(buf, size, "unavailable\n");
+               return size;
+       }
 
        dpm_context = smu_dpm->dpm_context;
 
        switch (type) {
 
        case SMU_OD_SCLK:
-               size = sysfs_emit(buf, "%s:\n", "GFXCLK");
+               size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
                fallthrough;
        case SMU_SCLK:
                ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
@@ -795,7 +799,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
                break;
 
        case SMU_OD_MCLK:
-               size = sysfs_emit(buf, "%s:\n", "MCLK");
+               size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
                fallthrough;
        case SMU_MCLK:
                ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
index 627ba2e..a403657 100644 (file)
@@ -1052,16 +1052,18 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
        int i, size = 0, ret = 0;
        uint32_t cur_value = 0, value = 0, count = 0;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_OD_SCLK:
-               size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
                size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
                size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
                (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
                break;
        case SMU_OD_RANGE:
-               size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
                size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                                smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
                break;
index 66711ab..843d2cb 100644 (file)
@@ -1053,3 +1053,24 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
 
        return ret;
 }
+
+bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
+{
+       struct pci_dev *p = NULL;
+       bool snd_driver_loaded;
+
+       /*
+        * If the ASIC comes with no audio function, we always assume
+        * it is "enabled".
+        */
+       p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+                       adev->pdev->bus->number, 1);
+       if (!p)
+               return true;
+
+       snd_driver_loaded = pci_is_enabled(p) ? true : false;
+
+       pci_dev_put(p);
+
+       return snd_driver_loaded;
+}
index 16993da..beea038 100644 (file)
@@ -110,5 +110,20 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
 int smu_cmn_set_mp1_state(struct smu_context *smu,
                          enum pp_mp1_state mp1_state);
 
+/*
+ * Helper function to make sysfs_emit_at() happy. Align buf to
+ * the current page boundary and record the offset.
+ */
+static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset)
+{
+       if (!*buf || !offset)
+               return;
+
+       *offset = offset_in_page(*buf);
+       *buf -= *offset;
+}
+
+bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
+
 #endif
 #endif
index 76d3856..cf741c5 100644 (file)
@@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
                if (switch_mmu_context) {
                        struct etnaviv_iommu_context *old_context = gpu->mmu_context;
 
-                       etnaviv_iommu_context_get(mmu_context);
-                       gpu->mmu_context = mmu_context;
+                       gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
                        etnaviv_iommu_context_put(old_context);
                }
 
index 8f1b5af..f0b2540 100644 (file)
@@ -294,8 +294,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
                list_del(&mapping->obj_node);
        }
 
-       etnaviv_iommu_context_get(mmu_context);
-       mapping->context = mmu_context;
+       mapping->context = etnaviv_iommu_context_get(mmu_context);
        mapping->use = 1;
 
        ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
index 4dd7d9d..486259e 100644 (file)
@@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
                goto err_submit_objects;
 
        submit->ctx = file->driver_priv;
-       etnaviv_iommu_context_get(submit->ctx->mmu);
-       submit->mmu_context = submit->ctx->mmu;
+       submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
        submit->exec_state = args->exec_state;
        submit->flags = args->flags;
 
index c297fff..cc5b07f 100644 (file)
@@ -569,6 +569,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
        /* We rely on the GPU running, so program the clock */
        etnaviv_gpu_update_clock(gpu);
 
+       gpu->fe_running = false;
+       gpu->exec_state = -1;
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = NULL;
+
        return 0;
 }
 
@@ -637,19 +643,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
                          VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
                          VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
        }
+
+       gpu->fe_running = true;
 }
 
-static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
+static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
+                                         struct etnaviv_iommu_context *context)
 {
-       u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
-                               &gpu->mmu_context->cmdbuf_mapping);
        u16 prefetch;
+       u32 address;
 
        /* setup the MMU */
-       etnaviv_iommu_restore(gpu, gpu->mmu_context);
+       etnaviv_iommu_restore(gpu, context);
 
        /* Start command processor */
        prefetch = etnaviv_buffer_init(gpu);
+       address = etnaviv_cmdbuf_get_va(&gpu->buffer,
+                                       &gpu->mmu_context->cmdbuf_mapping);
 
        etnaviv_gpu_start_fe(gpu, address, prefetch);
 }
@@ -832,7 +842,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
        /* Now program the hardware */
        mutex_lock(&gpu->lock);
        etnaviv_gpu_hw_init(gpu);
-       gpu->exec_state = -1;
        mutex_unlock(&gpu->lock);
 
        pm_runtime_mark_last_busy(gpu->dev);
@@ -1057,8 +1066,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
        spin_unlock(&gpu->event_spinlock);
 
        etnaviv_gpu_hw_init(gpu);
-       gpu->exec_state = -1;
-       gpu->mmu_context = NULL;
 
        mutex_unlock(&gpu->lock);
        pm_runtime_mark_last_busy(gpu->dev);
@@ -1370,14 +1377,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
                goto out_unlock;
        }
 
-       if (!gpu->mmu_context) {
-               etnaviv_iommu_context_get(submit->mmu_context);
-               gpu->mmu_context = submit->mmu_context;
-               etnaviv_gpu_start_fe_idleloop(gpu);
-       } else {
-               etnaviv_iommu_context_get(gpu->mmu_context);
-               submit->prev_mmu_context = gpu->mmu_context;
-       }
+       if (!gpu->fe_running)
+               etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
+
+       if (submit->prev_mmu_context)
+               etnaviv_iommu_context_put(submit->prev_mmu_context);
+       submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
 
        if (submit->nr_pmrs) {
                gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
@@ -1579,7 +1584,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
 
 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
 {
-       if (gpu->initialized && gpu->mmu_context) {
+       if (gpu->initialized && gpu->fe_running) {
                /* Replace the last WAIT with END */
                mutex_lock(&gpu->lock);
                etnaviv_buffer_end(gpu);
@@ -1592,8 +1597,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
                 */
                etnaviv_gpu_wait_idle(gpu, 100);
 
-               etnaviv_iommu_context_put(gpu->mmu_context);
-               gpu->mmu_context = NULL;
+               gpu->fe_running = false;
        }
 
        gpu->exec_state = -1;
@@ -1741,6 +1745,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
        etnaviv_gpu_hw_suspend(gpu);
 #endif
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+
        if (gpu->initialized) {
                etnaviv_cmdbuf_free(&gpu->buffer);
                etnaviv_iommu_global_fini(gpu);
index 8ea4869..1c75c8e 100644 (file)
@@ -101,6 +101,7 @@ struct etnaviv_gpu {
        struct workqueue_struct *wq;
        struct drm_gpu_scheduler sched;
        bool initialized;
+       bool fe_running;
 
        /* 'ring'-buffer: */
        struct etnaviv_cmdbuf buffer;
index 1a7c89a..afe5dd6 100644 (file)
@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
        struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
        u32 pgtable;
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = etnaviv_iommu_context_get(context);
+
        /* set base addresses */
        gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
        gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
index f8bf488..d664ae2 100644 (file)
@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
        if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
                return;
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = etnaviv_iommu_context_get(context);
+
        prefetch = etnaviv_buffer_config_mmuv2(gpu,
                                (u32)v2_context->mtlb_dma,
                                (u32)context->global->bad_page_dma);
@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
        if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
                return;
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = etnaviv_iommu_context_get(context);
+
        gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
                  lower_32_bits(context->global->v2.pta_dma));
        gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
index dab1b58..9fb1a2a 100644 (file)
@@ -199,6 +199,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
                 */
                list_for_each_entry_safe(m, n, &list, scan_node) {
                        etnaviv_iommu_remove_mapping(context, m);
+                       etnaviv_iommu_context_put(m->context);
                        m->context = NULL;
                        list_del_init(&m->mmu_node);
                        list_del_init(&m->scan_node);
index d1d6902..e4a0b7d 100644 (file)
@@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
 struct etnaviv_iommu_context *
 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
                           struct etnaviv_cmdbuf_suballoc *suballoc);
-static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
+static inline struct etnaviv_iommu_context *
+etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
 {
        kref_get(&ctx->refcount);
+       return ctx;
 }
 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
index 642a5b5..335ba9f 100644 (file)
@@ -19,7 +19,6 @@ subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
 subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
 # clang warnings
 subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
-subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
 subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
 subdir-ccflags-y += $(call cc-disable-warning, frame-address)
 subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
index 04175f3..abe3d61 100644 (file)
@@ -2445,11 +2445,14 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
         */
        if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
                             intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
-                            sizeof(intel_dp->edp_dpcd))
+                            sizeof(intel_dp->edp_dpcd)) {
                drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
                            (int)sizeof(intel_dp->edp_dpcd),
                            intel_dp->edp_dpcd);
 
+               intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
+       }
+
        /*
         * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
         * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
index 053a3c2..508a514 100644 (file)
@@ -848,7 +848,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
        }
 
        if (ret)
-               intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
+               ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
 
        if (intel_dp->set_idle_link_train)
                intel_dp->set_idle_link_train(intel_dp, crtc_state);
index cff7267..9ccf4b2 100644 (file)
@@ -986,6 +986,9 @@ void i915_gem_context_release(struct kref *ref)
        trace_i915_context_free(ctx);
        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
+       if (ctx->syncobj)
+               drm_syncobj_put(ctx->syncobj);
+
        mutex_destroy(&ctx->engines_mutex);
        mutex_destroy(&ctx->lut_mutex);
 
@@ -1205,9 +1208,6 @@ static void context_close(struct i915_gem_context *ctx)
        if (vm)
                i915_vm_close(vm);
 
-       if (ctx->syncobj)
-               drm_syncobj_put(ctx->syncobj);
-
        ctx->file_priv = ERR_PTR(-EBADF);
 
        /*
index ffae7df..4a6bb64 100644 (file)
@@ -59,13 +59,13 @@ static int igt_dmabuf_import_self(void *arg)
                err = PTR_ERR(import);
                goto out_dmabuf;
        }
+       import_obj = to_intel_bo(import);
 
        if (import != &obj->base) {
                pr_err("i915_gem_prime_import created a new object!\n");
                err = -EINVAL;
                goto out_import;
        }
-       import_obj = to_intel_bo(import);
 
        i915_gem_object_lock(import_obj, NULL);
        err = __i915_gem_object_get_pages(import_obj);
@@ -128,6 +128,8 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg)
                pr_err("i915_gem_prime_import failed with the wrong err=%ld\n",
                       PTR_ERR(import));
                err = PTR_ERR(import);
+       } else {
+               err = 0;
        }
 
        dma_buf_put(dmabuf);
@@ -176,6 +178,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
                err = PTR_ERR(import);
                goto out_dmabuf;
        }
+       import_obj = to_intel_bo(import);
 
        if (import == &obj->base) {
                pr_err("i915_gem_prime_import reused gem object!\n");
@@ -183,8 +186,6 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
                goto out_import;
        }
 
-       import_obj = to_intel_bo(import);
-
        i915_gem_object_lock(import_obj, NULL);
        err = __i915_gem_object_get_pages(import_obj);
        if (err) {
index b20f562..a2c34e5 100644 (file)
@@ -581,6 +581,20 @@ static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
        return I915_MMAP_TYPE_GTT;
 }
 
+static struct drm_i915_gem_object *
+create_sys_or_internal(struct drm_i915_private *i915,
+                      unsigned long size)
+{
+       if (HAS_LMEM(i915)) {
+               struct intel_memory_region *sys_region =
+                       i915->mm.regions[INTEL_REGION_SMEM];
+
+               return __i915_gem_object_create_user(i915, size, &sys_region, 1);
+       }
+
+       return i915_gem_object_create_internal(i915, size);
+}
+
 static bool assert_mmap_offset(struct drm_i915_private *i915,
                               unsigned long size,
                               int expected)
@@ -589,7 +603,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
        u64 offset;
        int ret;
 
-       obj = i915_gem_object_create_internal(i915, size);
+       obj = create_sys_or_internal(i915, size);
        if (IS_ERR(obj))
                return expected && expected == PTR_ERR(obj);
 
@@ -633,6 +647,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
        struct drm_mm_node *hole, *next;
        int loop, err = 0;
        u64 offset;
+       int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
 
        /* Disable background reaper */
        disable_retire_worker(i915);
@@ -683,14 +698,14 @@ static int igt_mmap_offset_exhaustion(void *arg)
        }
 
        /* Too large */
-       if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
+       if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
                pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
                err = -EINVAL;
                goto out;
        }
 
        /* Fill the hole, further allocation attempts should then fail */
-       obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+       obj = create_sys_or_internal(i915, PAGE_SIZE);
        if (IS_ERR(obj)) {
                err = PTR_ERR(obj);
                pr_err("Unable to create object for reclaimed hole\n");
@@ -703,7 +718,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
                goto err_obj;
        }
 
-       if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
+       if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
                pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
                err = -EINVAL;
                goto err_obj;
@@ -839,10 +854,9 @@ static int wc_check(struct drm_i915_gem_object *obj)
 
 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
 {
-       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        bool no_map;
 
-       if (HAS_LMEM(i915))
+       if (obj->ops->mmap_offset)
                return type == I915_MMAP_TYPE_FIXED;
        else if (type == I915_MMAP_TYPE_FIXED)
                return false;
index d812b27..591a522 100644 (file)
@@ -1973,8 +1973,14 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
 u32 intel_rps_read_punit_req(struct intel_rps *rps)
 {
        struct intel_uncore *uncore = rps_to_uncore(rps);
+       struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
+       intel_wakeref_t wakeref;
+       u32 freq = 0;
 
-       return intel_uncore_read(uncore, GEN6_RPNSWREQ);
+       with_intel_runtime_pm_if_in_use(rpm, wakeref)
+               freq = intel_uncore_read(uncore, GEN6_RPNSWREQ);
+
+       return freq;
 }
 
 static u32 intel_rps_get_req(u32 pureq)
index b104fb7..86c3185 100644 (file)
@@ -172,11 +172,6 @@ void intel_uc_driver_remove(struct intel_uc *uc)
        __uc_free_load_err_log(uc);
 }
 
-static inline bool guc_communication_enabled(struct intel_guc *guc)
-{
-       return intel_guc_ct_enabled(&guc->ct);
-}
-
 /*
  * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
  * register using the same bits used in the CT message payload. Since our
@@ -210,7 +205,7 @@ static void guc_get_mmio_msg(struct intel_guc *guc)
 static void guc_handle_mmio_msg(struct intel_guc *guc)
 {
        /* we need communication to be enabled to reply to GuC */
-       GEM_BUG_ON(!guc_communication_enabled(guc));
+       GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
 
        spin_lock_irq(&guc->irq_lock);
        if (guc->mmio_msg) {
@@ -226,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc)
        struct drm_i915_private *i915 = gt->i915;
        int ret;
 
-       GEM_BUG_ON(guc_communication_enabled(guc));
+       GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
 
        ret = i915_inject_probe_error(i915, -ENXIO);
        if (ret)
@@ -662,7 +657,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
                return 0;
 
        /* Make sure we enable communication if and only if it's disabled */
-       GEM_BUG_ON(enable_communication == guc_communication_enabled(guc));
+       GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
 
        if (enable_communication)
                guc_enable_communication(guc);
index b0ece71..ce77457 100644 (file)
@@ -57,7 +57,7 @@ nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
                args->v0.count = 0;
                args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
                args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
-               args->v0.pwrsrc = -ENOSYS;
+               args->v0.pwrsrc = -ENODEV;
                args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN;
        }
 
index 0473583..482fb0a 100644 (file)
@@ -119,7 +119,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 #endif
 
        if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
-               rdev->agp = radeon_agp_head_init(rdev->ddev);
+               rdev->agp = radeon_agp_head_init(dev);
        if (rdev->agp) {
                rdev->agp->agp_mtrr = arch_phys_wc_add(
                        rdev->agp->agp_info.aper_base,
index 4a11150..b4b4653 100644 (file)
@@ -167,8 +167,6 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
        struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
        bool connected = false;
 
-       WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
-
        if (vc4_hdmi->hpd_gpio &&
            gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
                connected = true;
@@ -189,12 +187,10 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
                        }
                }
 
-               pm_runtime_put(&vc4_hdmi->pdev->dev);
                return connector_status_connected;
        }
 
        cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
-       pm_runtime_put(&vc4_hdmi->pdev->dev);
        return connector_status_disconnected;
 }
 
@@ -436,7 +432,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
        struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
        struct drm_connector *connector = &vc4_hdmi->connector;
        struct drm_connector_state *cstate = connector->state;
-       struct drm_crtc *crtc = cstate->crtc;
+       struct drm_crtc *crtc = encoder->crtc;
        const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
        union hdmi_infoframe frame;
        int ret;
@@ -541,11 +537,8 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder,
 
 static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
 {
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
-       struct drm_connector *connector = &vc4_hdmi->connector;
-       struct drm_connector_state *cstate = connector->state;
-       struct drm_crtc *crtc = cstate->crtc;
-       struct drm_display_mode *mode = &crtc->state->adjusted_mode;
 
        if (!vc4_hdmi_supports_scrambling(encoder, mode))
                return;
@@ -566,18 +559,17 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
 static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
 {
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
-       struct drm_connector *connector = &vc4_hdmi->connector;
-       struct drm_connector_state *cstate = connector->state;
+       struct drm_crtc *crtc = encoder->crtc;
 
        /*
-        * At boot, connector->state will be NULL. Since we don't know the
+        * At boot, encoder->crtc will be NULL. Since we don't know the
         * state of the scrambler and in order to avoid any
         * inconsistency, let's disable it all the time.
         */
-       if (cstate && !vc4_hdmi_supports_scrambling(encoder, &cstate->crtc->mode))
+       if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode))
                return;
 
-       if (cstate && !vc4_hdmi_mode_needs_scrambling(&cstate->crtc->mode))
+       if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode))
                return;
 
        if (delayed_work_pending(&vc4_hdmi->scrambling_work))
@@ -635,6 +627,7 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
                vc4_hdmi->variant->phy_disable(vc4_hdmi);
 
        clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
+       clk_disable_unprepare(vc4_hdmi->hsm_clock);
        clk_disable_unprepare(vc4_hdmi->pixel_clock);
 
        ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
@@ -898,9 +891,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
                vc4_hdmi_encoder_get_connector_state(encoder, state);
        struct vc4_hdmi_connector_state *vc4_conn_state =
                conn_state_to_vc4_hdmi_conn_state(conn_state);
-       struct drm_crtc_state *crtc_state =
-               drm_atomic_get_new_crtc_state(state, conn_state->crtc);
-       struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
        unsigned long bvb_rate, pixel_rate, hsm_rate;
        int ret;
@@ -947,6 +938,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
                return;
        }
 
+       ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+       if (ret) {
+               DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
+               clk_disable_unprepare(vc4_hdmi->pixel_clock);
+               return;
+       }
+
        vc4_hdmi_cec_update_clk_div(vc4_hdmi);
 
        if (pixel_rate > 297000000)
@@ -959,6 +957,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
        ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
        if (ret) {
                DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
+               clk_disable_unprepare(vc4_hdmi->hsm_clock);
                clk_disable_unprepare(vc4_hdmi->pixel_clock);
                return;
        }
@@ -966,6 +965,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
        ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
        if (ret) {
                DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
+               clk_disable_unprepare(vc4_hdmi->hsm_clock);
                clk_disable_unprepare(vc4_hdmi->pixel_clock);
                return;
        }
@@ -985,11 +985,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
 static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
                                             struct drm_atomic_state *state)
 {
-       struct drm_connector_state *conn_state =
-               vc4_hdmi_encoder_get_connector_state(encoder, state);
-       struct drm_crtc_state *crtc_state =
-               drm_atomic_get_new_crtc_state(state, conn_state->crtc);
-       struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
 
@@ -1012,11 +1008,7 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
 static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
                                              struct drm_atomic_state *state)
 {
-       struct drm_connector_state *conn_state =
-               vc4_hdmi_encoder_get_connector_state(encoder, state);
-       struct drm_crtc_state *crtc_state =
-               drm_atomic_get_new_crtc_state(state, conn_state->crtc);
-       struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
        struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
        bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -1204,8 +1196,8 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
 
 static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate)
 {
-       struct drm_connector *connector = &vc4_hdmi->connector;
-       struct drm_crtc *crtc = connector->state->crtc;
+       struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
+       struct drm_crtc *crtc = encoder->crtc;
        const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
        u32 n, cts;
        u64 tmp;
@@ -1238,13 +1230,13 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai)
 static int vc4_hdmi_audio_startup(struct device *dev, void *data)
 {
        struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
-       struct drm_connector *connector = &vc4_hdmi->connector;
+       struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
 
        /*
         * If the HDMI encoder hasn't probed, or the encoder is
         * currently in DVI mode, treat the codec dai as missing.
         */
-       if (!connector->state || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
+       if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
                                VC4_HDMI_RAM_PACKET_ENABLE))
                return -ENODEV;
 
@@ -2114,29 +2106,6 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int vc4_hdmi_runtime_suspend(struct device *dev)
-{
-       struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
-
-       clk_disable_unprepare(vc4_hdmi->hsm_clock);
-
-       return 0;
-}
-
-static int vc4_hdmi_runtime_resume(struct device *dev)
-{
-       struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
-       int ret;
-
-       ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-#endif
-
 static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
 {
        const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@@ -2391,18 +2360,11 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
        {}
 };
 
-static const struct dev_pm_ops vc4_hdmi_pm_ops = {
-       SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
-                          vc4_hdmi_runtime_resume,
-                          NULL)
-};
-
 struct platform_driver vc4_hdmi_driver = {
        .probe = vc4_hdmi_dev_probe,
        .remove = vc4_hdmi_dev_remove,
        .driver = {
                .name = "vc4_hdmi",
                .of_match_table = vc4_hdmi_dt_match,
-               .pm = &vc4_hdmi_pm_ops,
        },
 };
index b50b7fa..f4c3efc 100644 (file)
@@ -973,7 +973,7 @@ static inline void tx_on(struct scc_priv *priv)
                flags = claim_dma_lock();
                set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
                set_dma_addr(priv->param.dma,
-                            (int) priv->tx_buf[priv->tx_tail] + n);
+                            virt_to_bus(priv->tx_buf[priv->tx_tail]) + n);
                set_dma_count(priv->param.dma,
                              priv->tx_len[priv->tx_tail] - n);
                release_dma_lock(flags);
@@ -1020,7 +1020,7 @@ static inline void rx_on(struct scc_priv *priv)
                flags = claim_dma_lock();
                set_dma_mode(priv->param.dma, DMA_MODE_READ);
                set_dma_addr(priv->param.dma,
-                            (int) priv->rx_buf[priv->rx_head]);
+                            virt_to_bus(priv->rx_buf[priv->rx_head]));
                set_dma_count(priv->param.dma, BUF_SIZE);
                release_dma_lock(flags);
                enable_dma(priv->param.dma);
@@ -1233,7 +1233,7 @@ static void special_condition(struct scc_priv *priv, int rc)
                if (priv->param.dma >= 0) {
                        flags = claim_dma_lock();
                        set_dma_addr(priv->param.dma,
-                                    (int) priv->rx_buf[priv->rx_head]);
+                                    virt_to_bus(priv->rx_buf[priv->rx_head]));
                        set_dma_count(priv->param.dma, BUF_SIZE);
                        release_dma_lock(flags);
                } else {
index 7efb31b..6600e13 100644 (file)
@@ -3524,7 +3524,9 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
        lockdep_assert_held(&subsys->lock);
 
        list_for_each_entry(h, &subsys->nsheads, entry) {
-               if (h->ns_id == nsid && nvme_tryget_ns_head(h))
+               if (h->ns_id != nsid)
+                       continue;
+               if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
                        return h;
        }
 
@@ -3843,6 +3845,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 
        mutex_lock(&ns->ctrl->subsys->lock);
        list_del_rcu(&ns->siblings);
+       if (list_empty(&ns->head->list)) {
+               list_del_init(&ns->head->entry);
+               last_path = true;
+       }
        mutex_unlock(&ns->ctrl->subsys->lock);
 
        /* guarantee not available in head->list */
@@ -3856,20 +3862,11 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                nvme_cdev_del(&ns->cdev, &ns->cdev_device);
        del_gendisk(ns->disk);
        blk_cleanup_queue(ns->queue);
-       if (blk_get_integrity(ns->disk))
-               blk_integrity_unregister(ns->disk);
 
        down_write(&ns->ctrl->namespaces_rwsem);
        list_del_init(&ns->list);
        up_write(&ns->ctrl->namespaces_rwsem);
 
-       /* Synchronize with nvme_init_ns_head() */
-       mutex_lock(&ns->head->subsys->lock);
-       if (list_empty(&ns->head->list)) {
-               list_del_init(&ns->head->entry);
-               last_path = true;
-       }
-       mutex_unlock(&ns->head->subsys->lock);
        if (last_path)
                nvme_mpath_shutdown_disk(ns->head);
        nvme_put_ns(ns);
index 5d7bc58..e8ccdd3 100644 (file)
@@ -600,14 +600,17 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
 
        down_read(&ctrl->namespaces_rwsem);
        list_for_each_entry(ns, &ctrl->namespaces, list) {
-               unsigned nsid = le32_to_cpu(desc->nsids[n]);
-
+               unsigned nsid;
+again:
+               nsid = le32_to_cpu(desc->nsids[n]);
                if (ns->head->ns_id < nsid)
                        continue;
                if (ns->head->ns_id == nsid)
                        nvme_update_ns_ana_state(desc, ns);
                if (++n == nr_nsids)
                        break;
+               if (ns->head->ns_id > nsid)
+                       goto again;
        }
        up_read(&ctrl->namespaces_rwsem);
        return 0;
index a68704e..042c594 100644 (file)
@@ -656,8 +656,8 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
        if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
                return;
 
-       nvme_rdma_destroy_queue_ib(queue);
        rdma_destroy_id(queue->cm_id);
+       nvme_rdma_destroy_queue_ib(queue);
        mutex_destroy(&queue->queue_lock);
 }
 
@@ -1815,14 +1815,10 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
        for (i = 0; i < queue->queue_size; i++) {
                ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
                if (ret)
-                       goto out_destroy_queue_ib;
+                       return ret;
        }
 
        return 0;
-
-out_destroy_queue_ib:
-       nvme_rdma_destroy_queue_ib(queue);
-       return ret;
 }
 
 static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
@@ -1916,14 +1912,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
        if (ret) {
                dev_err(ctrl->ctrl.device,
                        "rdma_connect_locked failed (%d).\n", ret);
-               goto out_destroy_queue_ib;
+               return ret;
        }
 
        return 0;
-
-out_destroy_queue_ib:
-       nvme_rdma_destroy_queue_ib(queue);
-       return ret;
 }
 
 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
@@ -1954,8 +1946,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
        case RDMA_CM_EVENT_ROUTE_ERROR:
        case RDMA_CM_EVENT_CONNECT_ERROR:
        case RDMA_CM_EVENT_UNREACHABLE:
-               nvme_rdma_destroy_queue_ib(queue);
-               fallthrough;
        case RDMA_CM_EVENT_ADDR_ERROR:
                dev_dbg(queue->ctrl->ctrl.device,
                        "CM error event %d\n", ev->event);
index e2ab12f..e4249b7 100644 (file)
@@ -274,6 +274,12 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
        } while (ret > 0);
 }
 
+static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+{
+       return !list_empty(&queue->send_list) ||
+               !llist_empty(&queue->req_list) || queue->more_requests;
+}
+
 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
                bool sync, bool last)
 {
@@ -294,9 +300,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
                nvme_tcp_send_all(queue);
                queue->more_requests = false;
                mutex_unlock(&queue->send_mutex);
-       } else if (last) {
-               queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
        }
+
+       if (last && nvme_tcp_queue_more(queue))
+               queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
 }
 
 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
@@ -906,12 +913,6 @@ done:
        read_unlock_bh(&sk->sk_callback_lock);
 }
 
-static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
-{
-       return !list_empty(&queue->send_list) ||
-               !llist_empty(&queue->req_list) || queue->more_requests;
-}
-
 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
 {
        queue->request = NULL;
@@ -1145,8 +1146,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
                                pending = true;
                        else if (unlikely(result < 0))
                                break;
-               } else
-                       pending = !llist_empty(&queue->req_list);
+               }
 
                result = nvme_tcp_try_recv(queue);
                if (result > 0)
index d784f3c..be5d824 100644 (file)
@@ -1067,7 +1067,7 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
 {
        struct nvmet_subsys *subsys = to_subsys(item);
 
-       return snprintf(page, PAGE_SIZE, "%*s\n",
+       return snprintf(page, PAGE_SIZE, "%.*s\n",
                        NVMET_SN_MAX_SIZE, subsys->serial);
 }
 
index 5b043ee..b0800c2 100644 (file)
@@ -85,7 +85,11 @@ of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
                        break;
        }
 
-       if (i != count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
+       /*
+        * Attempt to initialize a restricted-dma-pool region if one was found.
+        * Note that count can hold a negative error code.
+        */
+       if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
                dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
 }
 
index 3fd74bb..a348348 100644 (file)
@@ -1291,7 +1291,6 @@ DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
 DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
 DEFINE_SIMPLE_PROP(leds, "leds", NULL)
 DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
-DEFINE_SIMPLE_PROP(phy_handle, "phy-handle", NULL)
 DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
 DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
 
@@ -1380,7 +1379,6 @@ static const struct supplier_bindings of_supplier_bindings[] = {
        { .parse_prop = parse_resets, },
        { .parse_prop = parse_leds, },
        { .parse_prop = parse_backlight, },
-       { .parse_prop = parse_phy_handle, },
        { .parse_prop = parse_gpio_compat, },
        { .parse_prop = parse_interrupts, },
        { .parse_prop = parse_regulators, },
index a1b1e2a..0f40943 100644 (file)
@@ -937,7 +937,7 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev);
 
 void pci_set_acpi_fwnode(struct pci_dev *dev)
 {
-       if (!ACPI_COMPANION(&dev->dev) && !pci_dev_is_added(dev))
+       if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev))
                ACPI_COMPANION_SET(&dev->dev,
                                   acpi_pci_find_companion(&dev->dev));
 }
index e5089af..4537d1e 100644 (file)
@@ -5435,7 +5435,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
                              PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
 
 /*
- * Create device link for NVIDIA GPU with integrated USB xHCI Host
+ * Create device link for GPUs with integrated USB xHCI Host
  * controller to VGA.
  */
 static void quirk_gpu_usb(struct pci_dev *usb)
@@ -5444,9 +5444,11 @@ static void quirk_gpu_usb(struct pci_dev *usb)
 }
 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
                              PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+                             PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
 
 /*
- * Create device link for NVIDIA GPU with integrated Type-C UCSI controller
+ * Create device link for GPUs with integrated Type-C UCSI controller
  * to VGA. Currently there is no class code defined for UCSI device over PCI
  * so using UNKNOWN class for now and it will be updated when UCSI
  * over PCI gets a class code.
@@ -5459,6 +5461,9 @@ static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
                              PCI_CLASS_SERIAL_UNKNOWN, 8,
                              quirk_gpu_usb_typec_ucsi);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+                             PCI_CLASS_SERIAL_UNKNOWN, 8,
+                             quirk_gpu_usb_typec_ucsi);
 
 /*
  * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
index 25557b2..4be2489 100644 (file)
@@ -99,6 +99,24 @@ error:
        return off ?: PCI_VPD_SZ_INVALID;
 }
 
+static bool pci_vpd_available(struct pci_dev *dev)
+{
+       struct pci_vpd *vpd = &dev->vpd;
+
+       if (!vpd->cap)
+               return false;
+
+       if (vpd->len == 0) {
+               vpd->len = pci_vpd_size(dev);
+               if (vpd->len == PCI_VPD_SZ_INVALID) {
+                       vpd->cap = 0;
+                       return false;
+               }
+       }
+
+       return true;
+}
+
 /*
  * Wait for last operation to complete.
  * This code has to spin since there is no other notification from the PCI
@@ -145,7 +163,7 @@ static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
        loff_t end = pos + count;
        u8 *buf = arg;
 
-       if (!vpd->cap)
+       if (!pci_vpd_available(dev))
                return -ENODEV;
 
        if (pos < 0)
@@ -206,7 +224,7 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
        loff_t end = pos + count;
        int ret = 0;
 
-       if (!vpd->cap)
+       if (!pci_vpd_available(dev))
                return -ENODEV;
 
        if (pos < 0 || (pos & 3) || (count & 3))
@@ -242,14 +260,11 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
 
 void pci_vpd_init(struct pci_dev *dev)
 {
+       if (dev->vpd.len == PCI_VPD_SZ_INVALID)
+               return;
+
        dev->vpd.cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
        mutex_init(&dev->vpd.lock);
-
-       if (!dev->vpd.len)
-               dev->vpd.len = pci_vpd_size(dev);
-
-       if (dev->vpd.len == PCI_VPD_SZ_INVALID)
-               dev->vpd.cap = 0;
 }
 
 static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
@@ -294,13 +309,14 @@ const struct attribute_group pci_dev_vpd_attr_group = {
 
 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size)
 {
-       unsigned int len = dev->vpd.len;
+       unsigned int len;
        void *buf;
        int cnt;
 
-       if (!dev->vpd.cap)
+       if (!pci_vpd_available(dev))
                return ERR_PTR(-ENODEV);
 
+       len = dev->vpd.len;
        buf = kmalloc(len, GFP_KERNEL);
        if (!buf)
                return ERR_PTR(-ENOMEM);
index 3481479..d6a7c89 100644 (file)
@@ -71,7 +71,7 @@
 #define AMD_CPU_ID_YC                  0x14B5
 
 #define PMC_MSG_DELAY_MIN_US           100
-#define RESPONSE_REGISTER_LOOP_MAX     200
+#define RESPONSE_REGISTER_LOOP_MAX     20000
 
 #define SOC_SUBSYSTEM_IP_MAX   12
 #define DELAY_MIN_US           2000
index 821aba3..42513ea 100644 (file)
@@ -166,8 +166,7 @@ config DELL_WMI
 
 config DELL_WMI_PRIVACY
        bool "Dell WMI Hardware Privacy Support"
-       depends on DELL_WMI
-       depends on LEDS_TRIGGER_AUDIO
+       depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO
        help
          This option adds integration with the "Dell Hardware Privacy"
          feature of Dell laptops to the dell-wmi driver.
index 7f3a03f..d53634c 100644 (file)
@@ -144,6 +144,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
index a33a582..0859894 100644 (file)
@@ -118,12 +118,30 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = {
        { }
 };
 
+/*
+ * Some devices, even non convertible ones, can send incorrect SW_TABLET_MODE
+ * reports. Accept such reports only from devices in this list.
+ */
+static const struct dmi_system_id dmi_auto_add_switch[] = {
+       {
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */),
+               },
+       },
+       {
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */),
+               },
+       },
+       {} /* Array terminator */
+};
+
 struct intel_hid_priv {
        struct input_dev *input_dev;
        struct input_dev *array;
        struct input_dev *switches;
        bool wakeup_mode;
-       bool dual_accel;
+       bool auto_add_switch;
 };
 
 #define HID_EVENT_FILTER_UUID  "eeec56b3-4442-408f-a792-4edd4d758054"
@@ -452,10 +470,8 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
         * Some convertible have unreliable VGBS return which could cause incorrect
         * SW_TABLET_MODE report, in these cases we enable support when receiving
         * the first event instead of during driver setup.
-        *
-        * See dual_accel_detect.h for more info on the dual_accel check.
         */
-       if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) {
+       if (!priv->switches && priv->auto_add_switch && (event == 0xcc || event == 0xcd)) {
                dev_info(&device->dev, "switch event received, enable switches supports\n");
                err = intel_hid_switches_setup(device);
                if (err)
@@ -596,7 +612,8 @@ static int intel_hid_probe(struct platform_device *device)
                return -ENOMEM;
        dev_set_drvdata(&device->dev, priv);
 
-       priv->dual_accel = dual_accel_detect();
+       /* See dual_accel_detect.h for more info on the dual_accel check. */
+       priv->auto_add_switch = dmi_check_system(dmi_auto_add_switch) && !dual_accel_detect();
 
        err = intel_hid_input_setup(device);
        if (err) {
index f58b854..66bb39f 100644 (file)
@@ -8,7 +8,6 @@
  * which provide mailbox interface for power management usage.
  */
 
-#include <linux/acpi.h>
 #include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/device.h>
@@ -319,7 +318,7 @@ static struct platform_driver intel_punit_ipc_driver = {
        .remove = intel_punit_ipc_remove,
        .driver = {
                .name = "intel_punit_ipc",
-               .acpi_match_table = ACPI_PTR(punit_ipc_acpi_ids),
+               .acpi_match_table = punit_ipc_acpi_ids,
        },
 };
 
index 3e520d5..88b551c 100644 (file)
@@ -655,7 +655,7 @@ static int acpi_add(struct acpi_device *device)
                goto out_platform_registered;
        }
        product = dmi_get_system_info(DMI_PRODUCT_NAME);
-       if (strlen(product) > 4)
+       if (product && strlen(product) > 4)
                switch (product[4]) {
                case '5':
                case '6':
index 0e1451b..033f797 100644 (file)
@@ -100,10 +100,10 @@ static const struct ts_dmi_data chuwi_hi10_air_data = {
 };
 
 static const struct property_entry chuwi_hi10_plus_props[] = {
-       PROPERTY_ENTRY_U32("touchscreen-min-x", 0),
-       PROPERTY_ENTRY_U32("touchscreen-min-y", 5),
-       PROPERTY_ENTRY_U32("touchscreen-size-x", 1914),
-       PROPERTY_ENTRY_U32("touchscreen-size-y", 1283),
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 12),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1908),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10plus.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
@@ -111,6 +111,15 @@ static const struct property_entry chuwi_hi10_plus_props[] = {
 };
 
 static const struct ts_dmi_data chuwi_hi10_plus_data = {
+       .embedded_fw = {
+               .name   = "silead/gsl1680-chuwi-hi10plus.fw",
+               .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+               .length = 34056,
+               .sha256 = { 0xfd, 0x0a, 0x08, 0x08, 0x3c, 0xa6, 0x34, 0x4e,
+                           0x2c, 0x49, 0x9c, 0xcd, 0x7d, 0x44, 0x9d, 0x38,
+                           0x10, 0x68, 0xb5, 0xbd, 0xb7, 0x2a, 0x63, 0xb5,
+                           0x67, 0x0b, 0x96, 0xbd, 0x89, 0x67, 0x85, 0x09 },
+       },
        .acpi_name      = "MSSL0017:00",
        .properties     = chuwi_hi10_plus_props,
 };
@@ -141,6 +150,33 @@ static const struct ts_dmi_data chuwi_hi10_pro_data = {
        .properties     = chuwi_hi10_pro_props,
 };
 
+static const struct property_entry chuwi_hibook_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 30),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 4),
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1892),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1276),
+       PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+       PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hibook.fw"),
+       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+       PROPERTY_ENTRY_BOOL("silead,home-button"),
+       { }
+};
+
+static const struct ts_dmi_data chuwi_hibook_data = {
+       .embedded_fw = {
+               .name   = "silead/gsl1680-chuwi-hibook.fw",
+               .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+               .length = 40392,
+               .sha256 = { 0xf7, 0xc0, 0xe8, 0x5a, 0x6c, 0xf2, 0xeb, 0x8d,
+                           0x12, 0xc4, 0x45, 0xbf, 0x55, 0x13, 0x4c, 0x1a,
+                           0x13, 0x04, 0x31, 0x08, 0x65, 0x73, 0xf7, 0xa8,
+                           0x1b, 0x7d, 0x59, 0xc9, 0xe6, 0x97, 0xf7, 0x38 },
+       },
+       .acpi_name      = "MSSL0017:00",
+       .properties     = chuwi_hibook_props,
+};
+
 static const struct property_entry chuwi_vi8_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
        PROPERTY_ENTRY_U32("touchscreen-min-y", 6),
@@ -980,6 +1016,16 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                },
        },
        {
+               /* Chuwi HiBook (CWI514) */
+               .driver_data = (void *)&chuwi_hibook_data,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+                       DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+                       /* Above matches are too generic, add bios-date match */
+                       DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
+               },
+       },
+       {
                /* Chuwi Vi8 (CWI506) */
                .driver_data = (void *)&chuwi_vi8_data,
                .matches = {
index 1d78b45..e34face 100644 (file)
@@ -269,5 +269,3 @@ module_exit(max14577_regulator_exit);
 MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
 MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:max14577-regulator");
-MODULE_ALIAS("platform:max77836-regulator");
index 6cca910..7f458d5 100644 (file)
@@ -991,7 +991,7 @@ static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
        RPMH_VREG("ldo4",   "ldo%s4",  &pmic5_nldo,      "vdd-l4"),
        RPMH_VREG("ldo5",   "ldo%s5",  &pmic5_pldo,      "vdd-l5-l6"),
        RPMH_VREG("ldo6",   "ldo%s6",  &pmic5_pldo,      "vdd-l5-l6"),
-       RPMH_VREG("ldo7",   "ldo%s6",  &pmic5_pldo_lv,   "vdd-l7"),
+       RPMH_VREG("ldo7",   "ldo%s7",  &pmic5_pldo_lv,   "vdd-l7"),
        {}
 };
 
index 2f3515f..f3d5c7f 100644 (file)
@@ -45,13 +45,14 @@ static void __init sclp_early_facilities_detect(void)
        sclp.has_gisaf = !!(sccb->fac118 & 0x08);
        sclp.has_hvs = !!(sccb->fac119 & 0x80);
        sclp.has_kss = !!(sccb->fac98 & 0x01);
-       sclp.has_sipl = !!(sccb->cbl & 0x4000);
        if (sccb->fac85 & 0x02)
                S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
        if (sccb->fac91 & 0x40)
                S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
        if (sccb->cpuoff > 134)
                sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
+       if (sccb->cpuoff > 137)
+               sclp.has_sipl = !!(sccb->cbl & 0x4000);
        sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
        sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
        sclp.rzm <<= 20;
index f433428..d9b8049 100644 (file)
@@ -213,7 +213,6 @@ static inline int ap_fetch_qci_info(struct ap_config_info *info)
  * ap_init_qci_info(): Allocate and query qci config info.
  * Does also update the static variables ap_max_domain_id
  * and ap_max_adapter_id if this info is available.
-
  */
 static void __init ap_init_qci_info(void)
 {
@@ -439,6 +438,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
 /**
  * ap_interrupt_handler() - Schedule ap_tasklet on interrupt
  * @airq: pointer to adapter interrupt descriptor
+ * @floating: ignored
  */
 static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
 {
@@ -1786,6 +1786,7 @@ static inline void ap_scan_adapter(int ap)
 /**
  * ap_scan_bus(): Scan the AP bus for new devices
  * Runs periodically, workqueue timer (ap_config_time)
+ * @unused: Unused pointer.
  */
 static void ap_scan_bus(struct work_struct *unused)
 {
index d70c4d3..9ea48bf 100644 (file)
@@ -20,7 +20,7 @@ static void __ap_flush_queue(struct ap_queue *aq);
 
 /**
  * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
- * @qid: The AP queue number
+ * @aq: The AP queue
  * @ind: the notification indicator byte
  *
  * Enables interruption on AP queue via ap_aqic(). Based on the return
@@ -311,7 +311,7 @@ static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
 
 /**
  * ap_sm_reset(): Reset an AP queue.
- * @qid: The AP queue number
+ * @aq: The AP queue
  *
  * Submit the Reset command to an AP queue.
  */
index 540861c..553b6b9 100644 (file)
@@ -600,6 +600,12 @@ static int rockchip_spi_transfer_one(
        int ret;
        bool use_dma;
 
+       /* Zero length transfers won't trigger an interrupt on completion */
+       if (!xfer->len) {
+               spi_finalize_current_transfer(ctlr);
+               return 1;
+       }
+
        WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
                (readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
 
index ebd27f8..8ce840c 100644 (file)
@@ -204,9 +204,6 @@ struct tegra_slink_data {
        struct dma_async_tx_descriptor          *tx_dma_desc;
 };
 
-static int tegra_slink_runtime_suspend(struct device *dev);
-static int tegra_slink_runtime_resume(struct device *dev);
-
 static inline u32 tegra_slink_readl(struct tegra_slink_data *tspi,
                unsigned long reg)
 {
@@ -1185,6 +1182,7 @@ static int tegra_slink_resume(struct device *dev)
 }
 #endif
 
+#ifdef CONFIG_PM
 static int tegra_slink_runtime_suspend(struct device *dev)
 {
        struct spi_master *master = dev_get_drvdata(dev);
@@ -1210,6 +1208,7 @@ static int tegra_slink_runtime_resume(struct device *dev)
        }
        return 0;
 }
+#endif /* CONFIG_PM */
 
 static const struct dev_pm_ops slink_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend,
index 57e2499..aea037c 100644 (file)
@@ -58,10 +58,6 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
        const struct spi_device *spi = to_spi_device(dev);
        int len;
 
-       len = of_device_modalias(dev, buf, PAGE_SIZE);
-       if (len != -ENODEV)
-               return len;
-
        len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
        if (len != -ENODEV)
                return len;
@@ -367,10 +363,6 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
        const struct spi_device         *spi = to_spi_device(dev);
        int rc;
 
-       rc = of_device_uevent_modalias(dev, env);
-       if (rc != -ENODEV)
-               return rc;
-
        rc = acpi_device_uevent_modalias(dev, env);
        if (rc != -ENODEV)
                return rc;
index d33c5cd..b26b79d 100644 (file)
@@ -582,7 +582,9 @@ config FB_HP300
 
 config FB_TGA
        tristate "TGA/SFB+ framebuffer support"
-       depends on FB && (ALPHA || TC)
+       depends on FB
+       depends on PCI || TC
+       depends on ALPHA || TC
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
index 5f1ce59..a37eb52 100644 (file)
@@ -214,7 +214,7 @@ config XEN_PVCALLS_FRONTEND
          implements them.
 
 config XEN_PVCALLS_BACKEND
-       bool "XEN PV Calls backend driver"
+       tristate "XEN PV Calls backend driver"
        depends on INET && XEN && XEN_BACKEND
        help
          Experimental backend for the Xen PV Calls protocol
index 671c712..2d28038 100644 (file)
@@ -43,6 +43,8 @@
 #include <linux/sched.h>
 #include <linux/cred.h>
 #include <linux/errno.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
 #include <linux/mm.h>
 #include <linux/memblock.h>
 #include <linux/pagemap.h>
@@ -115,7 +117,7 @@ static struct ctl_table xen_root[] = {
 #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
 
 /*
- * balloon_process() state:
+ * balloon_thread() state:
  *
  * BP_DONE: done or nothing to do,
  * BP_WAIT: wait to be rescheduled,
@@ -130,6 +132,8 @@ enum bp_state {
        BP_ECANCELED
 };
 
+/* Main waiting point for xen-balloon thread. */
+static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
 
 static DEFINE_MUTEX(balloon_mutex);
 
@@ -144,10 +148,6 @@ static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
 static LIST_HEAD(ballooned_pages);
 static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
 
-/* Main work function, always executed in process context. */
-static void balloon_process(struct work_struct *work);
-static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
-
 /* When ballooning out (allocating memory to return to Xen) we don't really
    want the kernel to try too hard since that can trigger the oom killer. */
 #define GFP_BALLOON \
@@ -366,7 +366,7 @@ static void xen_online_page(struct page *page, unsigned int order)
 static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
 {
        if (val == MEM_ONLINE)
-               schedule_delayed_work(&balloon_worker, 0);
+               wake_up(&balloon_thread_wq);
 
        return NOTIFY_OK;
 }
@@ -491,18 +491,43 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 }
 
 /*
- * As this is a work item it is guaranteed to run as a single instance only.
+ * Stop waiting if either state is not BP_EAGAIN and ballooning action is
+ * needed, or if the credit has changed while state is BP_EAGAIN.
+ */
+static bool balloon_thread_cond(enum bp_state state, long credit)
+{
+       if (state != BP_EAGAIN)
+               credit = 0;
+
+       return current_credit() != credit || kthread_should_stop();
+}
+
+/*
+ * As this is a kthread it is guaranteed to run as a single instance only.
  * We may of course race updates of the target counts (which are protected
  * by the balloon lock), or with changes to the Xen hard limit, but we will
  * recover from these in time.
  */
-static void balloon_process(struct work_struct *work)
+static int balloon_thread(void *unused)
 {
        enum bp_state state = BP_DONE;
        long credit;
+       unsigned long timeout;
+
+       set_freezable();
+       for (;;) {
+               if (state == BP_EAGAIN)
+                       timeout = balloon_stats.schedule_delay * HZ;
+               else
+                       timeout = 3600 * HZ;
+               credit = current_credit();
 
+               wait_event_interruptible_timeout(balloon_thread_wq,
+                                balloon_thread_cond(state, credit), timeout);
+
+               if (kthread_should_stop())
+                       return 0;
 
-       do {
                mutex_lock(&balloon_mutex);
 
                credit = current_credit();
@@ -529,12 +554,7 @@ static void balloon_process(struct work_struct *work)
                mutex_unlock(&balloon_mutex);
 
                cond_resched();
-
-       } while (credit && state == BP_DONE);
-
-       /* Schedule more work if there is some still to be done. */
-       if (state == BP_EAGAIN)
-               schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
+       }
 }
 
 /* Resets the Xen limit, sets new target, and kicks off processing. */
@@ -542,7 +562,7 @@ void balloon_set_new_target(unsigned long target)
 {
        /* No need for lock. Not read-modify-write updates. */
        balloon_stats.target_pages = target;
-       schedule_delayed_work(&balloon_worker, 0);
+       wake_up(&balloon_thread_wq);
 }
 EXPORT_SYMBOL_GPL(balloon_set_new_target);
 
@@ -647,7 +667,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
 
        /* The balloon may be too large now. Shrink it if needed. */
        if (current_credit())
-               schedule_delayed_work(&balloon_worker, 0);
+               wake_up(&balloon_thread_wq);
 
        mutex_unlock(&balloon_mutex);
 }
@@ -679,6 +699,8 @@ static void __init balloon_add_region(unsigned long start_pfn,
 
 static int __init balloon_init(void)
 {
+       struct task_struct *task;
+
        if (!xen_domain())
                return -ENODEV;
 
@@ -722,6 +744,12 @@ static int __init balloon_init(void)
        }
 #endif
 
+       task = kthread_run(balloon_thread, NULL, "xen-balloon");
+       if (IS_ERR(task)) {
+               pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
+               return PTR_ERR(task);
+       }
+
        /* Init the xen-balloon driver. */
        xen_balloon_init();
 
index 643fe44..8c10edf 100644 (file)
@@ -106,27 +106,26 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
 
 static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
 {
-       int i, rc;
-       int dma_bits;
+       int rc;
+       unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
+       unsigned int i, dma_bits = order + PAGE_SHIFT;
        dma_addr_t dma_handle;
        phys_addr_t p = virt_to_phys(buf);
 
-       dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
+       BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
+       BUG_ON(nslabs % IO_TLB_SEGSIZE);
 
        i = 0;
        do {
-               int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
-
                do {
                        rc = xen_create_contiguous_region(
-                               p + (i << IO_TLB_SHIFT),
-                               get_order(slabs << IO_TLB_SHIFT),
+                               p + (i << IO_TLB_SHIFT), order,
                                dma_bits, &dma_handle);
                } while (rc && dma_bits++ < MAX_DMA_BITS);
                if (rc)
                        return rc;
 
-               i += slabs;
+               i += IO_TLB_SEGSIZE;
        } while (i < nslabs);
        return 0;
 }
@@ -153,9 +152,7 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
        return "";
 }
 
-#define DEFAULT_NSLABS         ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
-
-int __ref xen_swiotlb_init(void)
+int xen_swiotlb_init(void)
 {
        enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
        unsigned long bytes = swiotlb_size_or_default();
@@ -185,7 +182,7 @@ retry:
                order--;
        }
        if (!start)
-               goto error;
+               goto exit;
        if (order != get_order(bytes)) {
                pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
                        (PAGE_SIZE << order) >> 20);
@@ -208,15 +205,15 @@ retry:
        swiotlb_set_max_segment(PAGE_SIZE);
        return 0;
 error:
-       if (repeat--) {
+       if (nslabs > 1024 && repeat--) {
                /* Min is 2MB */
-               nslabs = max(1024UL, (nslabs >> 1));
-               pr_info("Lowering to %luMB\n",
-                       (nslabs << IO_TLB_SHIFT) >> 20);
+               nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
+               bytes = nslabs << IO_TLB_SHIFT;
+               pr_info("Lowering to %luMB\n", bytes >> 20);
                goto retry;
        }
+exit:
        pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
-       free_pages((unsigned long)start, order);
        return rc;
 }
 
@@ -244,9 +241,9 @@ retry:
        rc = xen_swiotlb_fixup(start, nslabs);
        if (rc) {
                memblock_free(__pa(start), PAGE_ALIGN(bytes));
-               if (repeat--) {
+               if (nslabs > 1024 && repeat--) {
                        /* Min is 2MB */
-                       nslabs = max(1024UL, (nslabs >> 1));
+                       nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
                        bytes = nslabs << IO_TLB_SHIFT;
                        pr_info("Lowering to %luMB\n", bytes >> 20);
                        goto retry;
@@ -254,7 +251,7 @@ retry:
                panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
        }
 
-       if (swiotlb_init_with_tbl(start, nslabs, false))
+       if (swiotlb_init_with_tbl(start, nslabs, true))
                panic("Cannot allocate SWIOTLB buffer");
        swiotlb_set_max_segment(PAGE_SIZE);
 }
index 7d9b23d..1b4d580 100644 (file)
 #include "internal.h"
 
 /*
+ * Handle invalidation of an mmap'd file.  We invalidate all the PTEs referring
+ * to the pages in this file's pagecache, forcing the kernel to go through
+ * ->fault() or ->page_mkwrite() - at which point we can handle invalidation
+ * more fully.
+ */
+void afs_invalidate_mmap_work(struct work_struct *work)
+{
+       struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work);
+
+       unmap_mapping_pages(vnode->vfs_inode.i_mapping, 0, 0, false);
+}
+
+void afs_server_init_callback_work(struct work_struct *work)
+{
+       struct afs_server *server = container_of(work, struct afs_server, initcb_work);
+       struct afs_vnode *vnode;
+       struct afs_cell *cell = server->cell;
+
+       down_read(&cell->fs_open_mmaps_lock);
+
+       list_for_each_entry(vnode, &cell->fs_open_mmaps, cb_mmap_link) {
+               if (vnode->cb_server == server) {
+                       clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+                       queue_work(system_unbound_wq, &vnode->cb_work);
+               }
+       }
+
+       up_read(&cell->fs_open_mmaps_lock);
+}
+
+/*
  * Allow the fileserver to request callback state (re-)initialisation.
  * Unfortunately, UUIDs are not guaranteed unique.
  */
@@ -29,8 +60,11 @@ void afs_init_callback_state(struct afs_server *server)
        rcu_read_lock();
        do {
                server->cb_s_break++;
-               server = rcu_dereference(server->uuid_next);
-       } while (0);
+               atomic_inc(&server->cell->fs_s_break);
+               if (!list_empty(&server->cell->fs_open_mmaps))
+                       queue_work(system_unbound_wq, &server->initcb_work);
+
+       } while ((server = rcu_dereference(server->uuid_next)));
        rcu_read_unlock();
 }
 
@@ -44,11 +78,17 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas
        clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
        if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
                vnode->cb_break++;
+               vnode->cb_v_break = vnode->volume->cb_v_break;
                afs_clear_permits(vnode);
 
                if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
                        afs_lock_may_be_available(vnode);
 
+               if (reason != afs_cb_break_for_deleted &&
+                   vnode->status.type == AFS_FTYPE_FILE &&
+                   atomic_read(&vnode->cb_nr_mmap))
+                       queue_work(system_unbound_wq, &vnode->cb_work);
+
                trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true);
        } else {
                trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, false);
index 887b673..d88407f 100644 (file)
@@ -166,6 +166,8 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
        seqlock_init(&cell->volume_lock);
        cell->fs_servers = RB_ROOT;
        seqlock_init(&cell->fs_lock);
+       INIT_LIST_HEAD(&cell->fs_open_mmaps);
+       init_rwsem(&cell->fs_open_mmaps_lock);
        rwlock_init(&cell->vl_servers_lock);
        cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
 
index ac829e6..4579bbd 100644 (file)
@@ -1077,9 +1077,9 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
  */
 static int afs_d_revalidate_rcu(struct dentry *dentry)
 {
-       struct afs_vnode *dvnode, *vnode;
+       struct afs_vnode *dvnode;
        struct dentry *parent;
-       struct inode *dir, *inode;
+       struct inode *dir;
        long dir_version, de_version;
 
        _enter("%p", dentry);
@@ -1109,18 +1109,6 @@ static int afs_d_revalidate_rcu(struct dentry *dentry)
                        return -ECHILD;
        }
 
-       /* Check to see if the vnode referred to by the dentry still
-        * has a callback.
-        */
-       if (d_really_is_positive(dentry)) {
-               inode = d_inode_rcu(dentry);
-               if (inode) {
-                       vnode = AFS_FS_I(inode);
-                       if (!afs_check_validity(vnode))
-                               return -ECHILD;
-               }
-       }
-
        return 1; /* Still valid */
 }
 
@@ -1156,17 +1144,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
        if (IS_ERR(key))
                key = NULL;
 
-       if (d_really_is_positive(dentry)) {
-               inode = d_inode(dentry);
-               if (inode) {
-                       vnode = AFS_FS_I(inode);
-                       afs_validate(vnode, key);
-                       if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
-                               goto out_bad;
-               }
-       }
-
-       /* lock down the parent dentry so we can peer at it */
+       /* Hold the parent dentry so we can peer at it */
        parent = dget_parent(dentry);
        dir = AFS_FS_I(d_inode(parent));
 
@@ -1175,7 +1153,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
 
        if (test_bit(AFS_VNODE_DELETED, &dir->flags)) {
                _debug("%pd: parent dir deleted", dentry);
-               goto out_bad_parent;
+               goto not_found;
        }
 
        /* We only need to invalidate a dentry if the server's copy changed
@@ -1201,12 +1179,12 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
        case 0:
                /* the filename maps to something */
                if (d_really_is_negative(dentry))
-                       goto out_bad_parent;
+                       goto not_found;
                inode = d_inode(dentry);
                if (is_bad_inode(inode)) {
                        printk("kAFS: afs_d_revalidate: %pd2 has bad inode\n",
                               dentry);
-                       goto out_bad_parent;
+                       goto not_found;
                }
 
                vnode = AFS_FS_I(inode);
@@ -1228,9 +1206,6 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
                               dentry, fid.unique,
                               vnode->fid.unique,
                               vnode->vfs_inode.i_generation);
-                       write_seqlock(&vnode->cb_lock);
-                       set_bit(AFS_VNODE_DELETED, &vnode->flags);
-                       write_sequnlock(&vnode->cb_lock);
                        goto not_found;
                }
                goto out_valid;
@@ -1245,7 +1220,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
        default:
                _debug("failed to iterate dir %pd: %d",
                       parent, ret);
-               goto out_bad_parent;
+               goto not_found;
        }
 
 out_valid:
@@ -1256,16 +1231,9 @@ out_valid_noupdate:
        _leave(" = 1 [valid]");
        return 1;
 
-       /* the dirent, if it exists, now points to a different vnode */
 not_found:
-       spin_lock(&dentry->d_lock);
-       dentry->d_flags |= DCACHE_NFSFS_RENAMED;
-       spin_unlock(&dentry->d_lock);
-
-out_bad_parent:
        _debug("dropping dentry %pd2", dentry);
        dput(parent);
-out_bad:
        key_put(key);
 
        _leave(" = 0 [bad]");
@@ -1792,6 +1760,10 @@ static int afs_link(struct dentry *from, struct inode *dir,
                goto error;
        }
 
+       ret = afs_validate(vnode, op->key);
+       if (ret < 0)
+               goto error_op;
+
        afs_op_set_vnode(op, 0, dvnode);
        afs_op_set_vnode(op, 1, vnode);
        op->file[0].dv_delta = 1;
@@ -1805,6 +1777,8 @@ static int afs_link(struct dentry *from, struct inode *dir,
        op->create.reason       = afs_edit_dir_for_link;
        return afs_do_sync_operation(op);
 
+error_op:
+       afs_put_operation(op);
 error:
        d_drop(dentry);
        _leave(" = %d", ret);
@@ -1989,6 +1963,11 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
        if (IS_ERR(op))
                return PTR_ERR(op);
 
+       ret = afs_validate(vnode, op->key);
+       op->error = ret;
+       if (ret < 0)
+               goto error;
+
        afs_op_set_vnode(op, 0, orig_dvnode);
        afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
        op->file[0].dv_delta = 1;
index f4600c1..540b9fc 100644 (file)
@@ -263,7 +263,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
                if (b == nr_blocks) {
                        _debug("init %u", b);
                        afs_edit_init_block(meta, block, b);
-                       i_size_write(&vnode->vfs_inode, (b + 1) * AFS_DIR_BLOCK_SIZE);
+                       afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE);
                }
 
                /* Only lower dir pages have a counter in the header. */
@@ -296,7 +296,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
 new_directory:
        afs_edit_init_block(meta, meta, 0);
        i_size = AFS_DIR_BLOCK_SIZE;
-       i_size_write(&vnode->vfs_inode, i_size);
+       afs_set_i_size(vnode, i_size);
        slot = AFS_DIR_RESV_BLOCKS0;
        page = page0;
        block = meta;
index db035ae..e6c447a 100644 (file)
@@ -24,12 +24,16 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
 static int afs_releasepage(struct page *page, gfp_t gfp_flags);
 
 static void afs_readahead(struct readahead_control *ractl);
+static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+static void afs_vm_open(struct vm_area_struct *area);
+static void afs_vm_close(struct vm_area_struct *area);
+static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff);
 
 const struct file_operations afs_file_operations = {
        .open           = afs_open,
        .release        = afs_release,
        .llseek         = generic_file_llseek,
-       .read_iter      = generic_file_read_iter,
+       .read_iter      = afs_file_read_iter,
        .write_iter     = afs_file_write,
        .mmap           = afs_file_mmap,
        .splice_read    = generic_file_splice_read,
@@ -59,8 +63,10 @@ const struct address_space_operations afs_fs_aops = {
 };
 
 static const struct vm_operations_struct afs_vm_ops = {
+       .open           = afs_vm_open,
+       .close          = afs_vm_close,
        .fault          = filemap_fault,
-       .map_pages      = filemap_map_pages,
+       .map_pages      = afs_vm_map_pages,
        .page_mkwrite   = afs_page_mkwrite,
 };
 
@@ -295,7 +301,7 @@ static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
        fsreq->subreq   = subreq;
        fsreq->pos      = subreq->start + subreq->transferred;
        fsreq->len      = subreq->len   - subreq->transferred;
-       fsreq->key      = subreq->rreq->netfs_priv;
+       fsreq->key      = key_get(subreq->rreq->netfs_priv);
        fsreq->vnode    = vnode;
        fsreq->iter     = &fsreq->def_iter;
 
@@ -304,6 +310,7 @@ static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
                        fsreq->pos, fsreq->len);
 
        afs_fetch_data(fsreq->vnode, fsreq);
+       afs_put_read(fsreq);
 }
 
 static int afs_symlink_readpage(struct page *page)
@@ -490,15 +497,88 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
        return 1;
 }
 
+static void afs_add_open_mmap(struct afs_vnode *vnode)
+{
+       if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
+               down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+
+               list_add_tail(&vnode->cb_mmap_link,
+                             &vnode->volume->cell->fs_open_mmaps);
+
+               up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+       }
+}
+
+static void afs_drop_open_mmap(struct afs_vnode *vnode)
+{
+       if (!atomic_dec_and_test(&vnode->cb_nr_mmap))
+               return;
+
+       down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+
+       if (atomic_read(&vnode->cb_nr_mmap) == 0)
+               list_del_init(&vnode->cb_mmap_link);
+
+       up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+       flush_work(&vnode->cb_work);
+}
+
 /*
  * Handle setting up a memory mapping on an AFS file.
  */
 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
        int ret;
 
+       afs_add_open_mmap(vnode);
+
        ret = generic_file_mmap(file, vma);
        if (ret == 0)
                vma->vm_ops = &afs_vm_ops;
+       else
+               afs_drop_open_mmap(vnode);
        return ret;
 }
+
+static void afs_vm_open(struct vm_area_struct *vma)
+{
+       afs_add_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
+}
+
+static void afs_vm_close(struct vm_area_struct *vma)
+{
+       afs_drop_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
+}
+
+static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff)
+{
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(vmf->vma->vm_file));
+       struct afs_file *af = vmf->vma->vm_file->private_data;
+
+       switch (afs_validate(vnode, af->key)) {
+       case 0:
+               return filemap_map_pages(vmf, start_pgoff, end_pgoff);
+       case -ENOMEM:
+               return VM_FAULT_OOM;
+       case -EINTR:
+       case -ERESTARTSYS:
+               return VM_FAULT_RETRY;
+       case -ESTALE:
+       default:
+               return VM_FAULT_SIGBUS;
+       }
+}
+
+static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+       struct afs_file *af = iocb->ki_filp->private_data;
+       int ret;
+
+       ret = afs_validate(vnode, af->key);
+       if (ret < 0)
+               return ret;
+
+       return generic_file_read_iter(iocb, iter);
+}
index e7e98ad..c0031a3 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/slab.h>
 #include "afs_fs.h"
 #include "internal.h"
+#include "protocol_afs.h"
 #include "protocol_yfs.h"
 
 static unsigned int afs_fs_probe_fast_poll_interval = 30 * HZ;
@@ -102,7 +103,7 @@ void afs_fileserver_probe_result(struct afs_call *call)
        struct afs_addr_list *alist = call->alist;
        struct afs_server *server = call->server;
        unsigned int index = call->addr_ix;
-       unsigned int rtt_us = 0;
+       unsigned int rtt_us = 0, cap0;
        int ret = call->error;
 
        _enter("%pU,%u", &server->uuid, index);
@@ -159,6 +160,11 @@ responded:
                        clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
                        alist->addrs[index].srx_service = call->service_id;
                }
+               cap0 = ntohl(call->tmp);
+               if (cap0 & AFS3_VICED_CAPABILITY_64BITFILES)
+                       set_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
+               else
+                       clear_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
        }
 
        if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
index dd3f45d..4943413 100644 (file)
@@ -456,9 +456,7 @@ void afs_fs_fetch_data(struct afs_operation *op)
        struct afs_read *req = op->fetch.req;
        __be32 *bp;
 
-       if (upper_32_bits(req->pos) ||
-           upper_32_bits(req->len) ||
-           upper_32_bits(req->pos + req->len))
+       if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
                return afs_fs_fetch_data64(op);
 
        _enter("");
@@ -1113,9 +1111,7 @@ void afs_fs_store_data(struct afs_operation *op)
               (unsigned long long)op->store.pos,
               (unsigned long long)op->store.i_size);
 
-       if (upper_32_bits(op->store.pos) ||
-           upper_32_bits(op->store.size) ||
-           upper_32_bits(op->store.i_size))
+       if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
                return afs_fs_store_data64(op);
 
        call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData,
@@ -1229,7 +1225,7 @@ static void afs_fs_setattr_size(struct afs_operation *op)
               key_serial(op->key), vp->fid.vid, vp->fid.vnode);
 
        ASSERT(attr->ia_valid & ATTR_SIZE);
-       if (upper_32_bits(attr->ia_size))
+       if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
                return afs_fs_setattr_size64(op);
 
        call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData_as_Status,
@@ -1657,20 +1653,33 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
                        return ret;
 
                count = ntohl(call->tmp);
-
                call->count = count;
                call->count2 = count;
-               afs_extract_discard(call, count * sizeof(__be32));
+               if (count == 0) {
+                       call->unmarshall = 4;
+                       call->tmp = 0;
+                       break;
+               }
+
+               /* Extract the first word of the capabilities to call->tmp */
+               afs_extract_to_tmp(call);
                call->unmarshall++;
                fallthrough;
 
-               /* Extract capabilities words */
        case 2:
                ret = afs_extract_data(call, false);
                if (ret < 0)
                        return ret;
 
-               /* TODO: Examine capabilities */
+               afs_extract_discard(call, (count - 1) * sizeof(__be32));
+               call->unmarshall++;
+               fallthrough;
+
+               /* Extract remaining capabilities words */
+       case 3:
+               ret = afs_extract_data(call, false);
+               if (ret < 0)
+                       return ret;
 
                call->unmarshall++;
                break;
index 80b6c8d..8fcffea 100644 (file)
@@ -54,16 +54,6 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
 }
 
 /*
- * Set the file size and block count.  Estimate the number of 512 bytes blocks
- * used, rounded up to nearest 1K for consistency with other AFS clients.
- */
-static void afs_set_i_size(struct afs_vnode *vnode, u64 size)
-{
-       i_size_write(&vnode->vfs_inode, size);
-       vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
-}
-
-/*
  * Initialise an inode from the vnode status.
  */
 static int afs_inode_init_from_status(struct afs_operation *op,
@@ -587,22 +577,32 @@ static void afs_zap_data(struct afs_vnode *vnode)
 }
 
 /*
- * Get the server reinit counter for a vnode's current server.
+ * Check to see if we have a server currently serving this volume and that it
+ * hasn't been reinitialised or dropped from the list.
  */
-static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break)
+static bool afs_check_server_good(struct afs_vnode *vnode)
 {
-       struct afs_server_list *slist = rcu_dereference(vnode->volume->servers);
+       struct afs_server_list *slist;
        struct afs_server *server;
+       bool good;
        int i;
 
+       if (vnode->cb_fs_s_break == atomic_read(&vnode->volume->cell->fs_s_break))
+               return true;
+
+       rcu_read_lock();
+
+       slist = rcu_dereference(vnode->volume->servers);
        for (i = 0; i < slist->nr_servers; i++) {
                server = slist->servers[i].server;
                if (server == vnode->cb_server) {
-                       *_s_break = READ_ONCE(server->cb_s_break);
-                       return true;
+                       good = (vnode->cb_s_break == server->cb_s_break);
+                       rcu_read_unlock();
+                       return good;
                }
        }
 
+       rcu_read_unlock();
        return false;
 }
 
@@ -611,57 +611,46 @@ static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break)
  */
 bool afs_check_validity(struct afs_vnode *vnode)
 {
-       struct afs_volume *volume = vnode->volume;
        enum afs_cb_break_reason need_clear = afs_cb_break_no_break;
        time64_t now = ktime_get_real_seconds();
-       bool valid;
-       unsigned int cb_break, cb_s_break, cb_v_break;
+       unsigned int cb_break;
        int seq = 0;
 
        do {
                read_seqbegin_or_lock(&vnode->cb_lock, &seq);
-               cb_v_break = READ_ONCE(volume->cb_v_break);
                cb_break = vnode->cb_break;
 
-               if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
-                   afs_get_s_break_rcu(vnode, &cb_s_break)) {
-                       if (vnode->cb_s_break != cb_s_break ||
-                           vnode->cb_v_break != cb_v_break) {
-                               vnode->cb_s_break = cb_s_break;
-                               vnode->cb_v_break = cb_v_break;
-                               need_clear = afs_cb_break_for_vsbreak;
-                               valid = false;
-                       } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
+               if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
+                       if (vnode->cb_v_break != vnode->volume->cb_v_break)
+                               need_clear = afs_cb_break_for_v_break;
+                       else if (!afs_check_server_good(vnode))
+                               need_clear = afs_cb_break_for_s_reinit;
+                       else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
                                need_clear = afs_cb_break_for_zap;
-                               valid = false;
-                       } else if (vnode->cb_expires_at - 10 <= now) {
+                       else if (vnode->cb_expires_at - 10 <= now)
                                need_clear = afs_cb_break_for_lapsed;
-                               valid = false;
-                       } else {
-                               valid = true;
-                       }
                } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
-                       valid = true;
+                       ;
                } else {
-                       vnode->cb_v_break = cb_v_break;
-                       valid = false;
+                       need_clear = afs_cb_break_no_promise;
                }
 
        } while (need_seqretry(&vnode->cb_lock, seq));
 
        done_seqretry(&vnode->cb_lock, seq);
 
-       if (need_clear != afs_cb_break_no_break) {
-               write_seqlock(&vnode->cb_lock);
-               if (cb_break == vnode->cb_break)
-                       __afs_break_callback(vnode, need_clear);
-               else
-                       trace_afs_cb_miss(&vnode->fid, need_clear);
-               write_sequnlock(&vnode->cb_lock);
-               valid = false;
-       }
+       if (need_clear == afs_cb_break_no_break)
+               return true;
 
-       return valid;
+       write_seqlock(&vnode->cb_lock);
+       if (need_clear == afs_cb_break_no_promise)
+               vnode->cb_v_break = vnode->volume->cb_v_break;
+       else if (cb_break == vnode->cb_break)
+               __afs_break_callback(vnode, need_clear);
+       else
+               trace_afs_cb_miss(&vnode->fid, need_clear);
+       write_sequnlock(&vnode->cb_lock);
+       return false;
 }
 
 /*
@@ -675,21 +664,20 @@ bool afs_check_validity(struct afs_vnode *vnode)
  */
 int afs_validate(struct afs_vnode *vnode, struct key *key)
 {
-       bool valid;
        int ret;
 
        _enter("{v={%llx:%llu} fl=%lx},%x",
               vnode->fid.vid, vnode->fid.vnode, vnode->flags,
               key_serial(key));
 
-       rcu_read_lock();
-       valid = afs_check_validity(vnode);
-       rcu_read_unlock();
-
-       if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
-               clear_nlink(&vnode->vfs_inode);
+       if (unlikely(test_bit(AFS_VNODE_DELETED, &vnode->flags))) {
+               if (vnode->vfs_inode.i_nlink)
+                       clear_nlink(&vnode->vfs_inode);
+               goto valid;
+       }
 
-       if (valid)
+       if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
+           afs_check_validity(vnode))
                goto valid;
 
        down_write(&vnode->validate_lock);
index 5ed416f..0ad97a8 100644 (file)
@@ -390,6 +390,9 @@ struct afs_cell {
        /* Active fileserver interaction state. */
        struct rb_root          fs_servers;     /* afs_server (by server UUID) */
        seqlock_t               fs_lock;        /* For fs_servers  */
+       struct rw_semaphore     fs_open_mmaps_lock;
+       struct list_head        fs_open_mmaps;  /* List of vnodes that are mmapped */
+       atomic_t                fs_s_break;     /* Counter of CB.InitCallBackState messages */
 
        /* VL server list. */
        rwlock_t                vl_servers_lock; /* Lock on vl_servers */
@@ -503,6 +506,7 @@ struct afs_server {
        struct hlist_node       addr4_link;     /* Link in net->fs_addresses4 */
        struct hlist_node       addr6_link;     /* Link in net->fs_addresses6 */
        struct hlist_node       proc_link;      /* Link in net->fs_proc */
+       struct work_struct      initcb_work;    /* Work for CB.InitCallBackState* */
        struct afs_server       *gc_next;       /* Next server in manager's list */
        time64_t                unuse_time;     /* Time at which last unused */
        unsigned long           flags;
@@ -516,6 +520,7 @@ struct afs_server {
 #define AFS_SERVER_FL_IS_YFS   16              /* Server is YFS not AFS */
 #define AFS_SERVER_FL_NO_IBULK 17              /* Fileserver doesn't support FS.InlineBulkStatus */
 #define AFS_SERVER_FL_NO_RM2   18              /* Fileserver doesn't support YFS.RemoveFile2 */
+#define AFS_SERVER_FL_HAS_FS64 19              /* Fileserver supports FS.{Fetch,Store}Data64 */
        atomic_t                ref;            /* Object refcount */
        atomic_t                active;         /* Active user count */
        u32                     addr_version;   /* Address list version */
@@ -657,7 +662,11 @@ struct afs_vnode {
        afs_lock_type_t         lock_type : 8;
 
        /* outstanding callback notification on this file */
+       struct work_struct      cb_work;        /* Work for mmap'd files */
+       struct list_head        cb_mmap_link;   /* Link in cell->fs_open_mmaps */
        void                    *cb_server;     /* Server with callback/filelock */
+       atomic_t                cb_nr_mmap;     /* Number of mmaps */
+       unsigned int            cb_fs_s_break;  /* Mass server break counter (cell->fs_s_break) */
        unsigned int            cb_s_break;     /* Mass break counter on ->server */
        unsigned int            cb_v_break;     /* Mass break counter on ->volume */
        unsigned int            cb_break;       /* Break counter on vnode */
@@ -965,6 +974,8 @@ extern struct fscache_cookie_def afs_vnode_cache_index_def;
 /*
  * callback.c
  */
+extern void afs_invalidate_mmap_work(struct work_struct *);
+extern void afs_server_init_callback_work(struct work_struct *work);
 extern void afs_init_callback_state(struct afs_server *);
 extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
 extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
@@ -1586,6 +1597,16 @@ static inline void afs_update_dentry_version(struct afs_operation *op,
 }
 
 /*
+ * Set the file size and block count.  Estimate the number of 512 bytes blocks
+ * used, rounded up to nearest 1K for consistency with other AFS clients.
+ */
+static inline void afs_set_i_size(struct afs_vnode *vnode, u64 size)
+{
+       i_size_write(&vnode->vfs_inode, size);
+       vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
+}
+
+/*
  * Check for a conflicting operation on a directory that we just unlinked from.
  * If someone managed to sneak a link or an unlink in on the file we just
  * unlinked, we won't be able to trust nlink on an AFS file (but not YFS).
diff --git a/fs/afs/protocol_afs.h b/fs/afs/protocol_afs.h
new file mode 100644 (file)
index 0000000..0c39358
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* AFS protocol bits
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+
+#define AFSCAPABILITIESMAX 196 /* Maximum number of words in a capability set */
+
+/* AFS3 Fileserver capabilities word 0 */
+#define AFS3_VICED_CAPABILITY_ERRORTRANS       0x0001 /* Uses UAE errors */
+#define AFS3_VICED_CAPABILITY_64BITFILES       0x0002 /* FetchData64 & StoreData64 supported */
+#define AFS3_VICED_CAPABILITY_WRITELOCKACL     0x0004 /* Can lock a file even without lock perm */
+#define AFS3_VICED_CAPABILITY_SANEACLS         0x0008 /* ACLs reviewed for sanity - don't use */
index b5bd03b..e4cd89c 100644 (file)
@@ -168,3 +168,9 @@ enum yfs_lock_type {
        yfs_LockMandatoryWrite  = 0x101,
        yfs_LockMandatoryExtend = 0x102,
 };
+
+/* RXYFS Viced Capability Flags */
+#define YFS_VICED_CAPABILITY_ERRORTRANS                0x0001 /* Deprecated v0.195 */
+#define YFS_VICED_CAPABILITY_64BITFILES                0x0002 /* Deprecated v0.195 */
+#define YFS_VICED_CAPABILITY_WRITELOCKACL      0x0004 /* Can lock a file even without lock perm */
+#define YFS_VICED_CAPABILITY_SANEACLS          0x0008 /* Deprecated v0.195 */
index d83f13c..79e1a5f 100644 (file)
@@ -374,6 +374,7 @@ selected_server:
        if (vnode->cb_server != server) {
                vnode->cb_server = server;
                vnode->cb_s_break = server->cb_s_break;
+               vnode->cb_fs_s_break = atomic_read(&server->cell->fs_s_break);
                vnode->cb_v_break = vnode->volume->cb_v_break;
                clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
        }
index 684a2b0..6e5b9a1 100644 (file)
@@ -235,6 +235,7 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell,
        server->addr_version = alist->version;
        server->uuid = *uuid;
        rwlock_init(&server->fs_lock);
+       INIT_WORK(&server->initcb_work, afs_server_init_callback_work);
        init_waitqueue_head(&server->probe_wq);
        INIT_LIST_HEAD(&server->probe_link);
        spin_lock_init(&server->probe_lock);
@@ -467,6 +468,7 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
        if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
                afs_give_up_callbacks(net, server);
 
+       flush_work(&server->initcb_work);
        afs_put_server(net, server, afs_server_trace_destroy);
 }
 
index e38bb1e..d110def 100644 (file)
@@ -698,6 +698,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
        vnode->lock_state       = AFS_VNODE_LOCK_NONE;
 
        init_rwsem(&vnode->rmdir_lock);
+       INIT_WORK(&vnode->cb_work, afs_invalidate_mmap_work);
 
        _leave(" = %p", &vnode->vfs_inode);
        return &vnode->vfs_inode;
index c053469..2dfe3b3 100644 (file)
@@ -137,7 +137,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
                write_seqlock(&vnode->cb_lock);
                i_size = i_size_read(&vnode->vfs_inode);
                if (maybe_i_size > i_size)
-                       i_size_write(&vnode->vfs_inode, maybe_i_size);
+                       afs_set_i_size(vnode, maybe_i_size);
                write_sequnlock(&vnode->cb_lock);
        }
 
@@ -471,13 +471,18 @@ static void afs_extend_writeback(struct address_space *mapping,
                        }
 
                        /* Has the page moved or been split? */
-                       if (unlikely(page != xas_reload(&xas)))
+                       if (unlikely(page != xas_reload(&xas))) {
+                               put_page(page);
                                break;
+                       }
 
-                       if (!trylock_page(page))
+                       if (!trylock_page(page)) {
+                               put_page(page);
                                break;
+                       }
                        if (!PageDirty(page) || PageWriteback(page)) {
                                unlock_page(page);
+                               put_page(page);
                                break;
                        }
 
@@ -487,6 +492,7 @@ static void afs_extend_writeback(struct address_space *mapping,
                        t = afs_page_dirty_to(page, priv);
                        if (f != 0 && !new_content) {
                                unlock_page(page);
+                               put_page(page);
                                break;
                        }
 
@@ -801,6 +807,7 @@ int afs_writepages(struct address_space *mapping,
 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
 {
        struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+       struct afs_file *af = iocb->ki_filp->private_data;
        ssize_t result;
        size_t count = iov_iter_count(from);
 
@@ -816,6 +823,10 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
        if (!count)
                return 0;
 
+       result = afs_validate(vnode, af->key);
+       if (result < 0)
+               return result;
+
        result = generic_file_write_iter(iocb, from);
 
        _leave(" = %zd", result);
@@ -829,13 +840,18 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
  */
 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
-       struct inode *inode = file_inode(file);
-       struct afs_vnode *vnode = AFS_FS_I(inode);
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+       struct afs_file *af = file->private_data;
+       int ret;
 
        _enter("{%llx:%llu},{n=%pD},%d",
               vnode->fid.vid, vnode->fid.vnode, file,
               datasync);
 
+       ret = afs_validate(vnode, af->key);
+       if (ret < 0)
+               return ret;
+
        return file_write_and_wait_range(file, start, end);
 }
 
@@ -849,11 +865,14 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
        struct file *file = vmf->vma->vm_file;
        struct inode *inode = file_inode(file);
        struct afs_vnode *vnode = AFS_FS_I(inode);
+       struct afs_file *af = file->private_data;
        unsigned long priv;
        vm_fault_t ret = VM_FAULT_RETRY;
 
        _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
 
+       afs_validate(vnode, af->key);
+
        sb_start_pagefault(inode->i_sb);
 
        /* Wait for the page to be written to the cache before we allow it to
index 8a3b30e..8be57aa 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cache.c - CIFS filesystem cache index structure definitions
+ *   CIFS filesystem cache index structure definitions
  *
  *   Copyright (c) 2010 Novell, Inc.
  *   Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
index 51a824f..de2c12b 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- *   fs/cifs_debug.c
  *
  *   Copyright (C) International Business Machines  Corp., 2000,2005
  *
index 4fd7885..f974075 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifs_fs_sb.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2004
  *   Author(s): Steve French (sfrench@us.ibm.com)
index ef723be..b87cbbe 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifs_ioctl.h
  *
  *   Structure definitions for io control for cifs/smb3
  *
index 8fa26a8..353bd0d 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cifs_spnego.c -- SPNEGO upcall management for CIFS
+ *   SPNEGO upcall management for CIFS
  *
  *   Copyright (c) 2007 Red Hat, Inc.
  *   Author(s): Jeff Layton (jlayton@redhat.com)
index 31387d0..e6a0451 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifs_spnego.h -- SPNEGO upcall management for CIFS
+ *   SPNEGO upcall management for CIFS
  *
  *   Copyright (c) 2007 Red Hat, Inc.
  *   Author(s): Jeff Layton (jlayton@redhat.com)
index 171ad8b..e7582dd 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- *   fs/cifs/cifs_unicode.c
  *
  *   Copyright (c) International Business Machines  Corp., 2000,2009
  *   Modified by Steve French (sfrench@us.ibm.com)
index 388eb53..ee3aab3 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cifsacl.c
  *
  *   Copyright (C) International Business Machines  Corp., 2007,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index f8292bc..ccbfc75 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifsacl.h
  *
  *   Copyright (c) International Business Machines  Corp., 2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 2e6f403..d118282 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cifsencrypt.c
  *
  *   Encryption and hashing operations relating to NTLM, NTLMv2.  See MS-NLMP
  *   for more detailed information
index 8c20bfa..9fa930d 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cifsfs.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index d25a409..b50da19 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifsfs.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002, 2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
index c068f7d..e916470 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifsglob.h
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
@@ -1400,6 +1399,7 @@ struct cifsInodeInfo {
 #define CIFS_INO_INVALID_MAPPING         (4) /* pagecache is invalid */
 #define CIFS_INO_LOCK                    (5) /* lock bit for synchronization */
 #define CIFS_INO_MODIFIED_ATTR            (6) /* Indicate change in mtime/ctime */
+#define CIFS_INO_CLOSE_ON_LOCK            (7) /* Not to defer the close when lock is set */
        unsigned long flags;
        spinlock_t writers_lock;
        unsigned int writers;           /* Number of writers on this inode */
index 98e8e5a..d2ff438 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifspdu.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2009
  *   Author(s): Steve French (sfrench@us.ibm.com)
index f9740c2..d0f85b6 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifsproto.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
@@ -268,6 +267,9 @@ extern void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode);
 
 extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon);
 
+extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
+                               const char *path);
+
 extern struct TCP_Server_Info *cifs_get_tcp_session(struct smb3_fs_context *ctx);
 extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
                                 int from_reconnect);
index a8e41c1..243d176 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cifssmb.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2010
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 0db3448..7881115 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/connect.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2011
  *   Author(s): Steve French (sfrench@us.ibm.com)
@@ -1090,7 +1089,7 @@ next_pdu:
        module_put_and_exit(0);
 }
 
-/**
+/*
  * Returns true if srcaddr isn't specified and rhs isn't specified, or
  * if srcaddr is specified and matches the IP address of the rhs argument
  */
@@ -1550,6 +1549,9 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
 
 /**
  * cifs_setup_ipc - helper to setup the IPC tcon for the session
+ * @ses: smb session to issue the request on
+ * @ctx: the superblock configuration context to use for building the
+ *       new tree connection for the IPC (interprocess communication RPC)
  *
  * A new IPC connection is made and stored in the session
  * tcon_ipc. The IPC tcon has the same lifetime as the session.
@@ -1605,6 +1607,7 @@ out:
 
 /**
  * cifs_free_ipc - helper to release the session IPC tcon
+ * @ses: smb session to unmount the IPC from
  *
  * Needs to be called everytime a session is destroyed.
  *
@@ -1855,6 +1858,8 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)),
 
 /**
  * cifs_get_smb_ses - get a session matching @ctx data from @server
+ * @server: server to setup the session to
+ * @ctx: superblock configuration context to use to setup the session
  *
  * This function assumes it is being called from cifs_mount() where we
  * already got a server reference (server refcount +1). See
@@ -2065,6 +2070,8 @@ cifs_put_tcon(struct cifs_tcon *tcon)
 
 /**
  * cifs_get_tcon - get a tcon matching @ctx data from @ses
+ * @ses: smb session to issue the request on
+ * @ctx: the superblock configuration context to use for building the
  *
  * - tcon refcount is the number of mount points using the tcon.
  * - ses refcount is the number of tcon using the session.
@@ -3030,7 +3037,7 @@ build_unc_path_to_root(const struct smb3_fs_context *ctx,
        return full_path;
 }
 
-/**
+/*
  * expand_dfs_referral - Perform a dfs referral query and update the cifs_sb
  *
  * If a referral is found, cifs_sb->ctx->mount_options will be (re-)allocated
index 5f8a302..6e8e7cc 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/dir.c
  *
  *   vfs operations that deal with dentries
  *
index 8c616aa..0458d28 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *  fs/cifs/dns_resolve.c
  *
  *   Copyright (c) 2007 Igor Mammedov
  *   Author(s): Igor Mammedov (niallain@gmail.com)
index 9fa2807..afc0df3 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/dns_resolve.h -- DNS Resolver upcall management for CIFS DFS
- *                            Handles host name to IP address resolution
+ *   DNS Resolver upcall management for CIFS DFS
+ *   Handles host name to IP address resolution
  *
  *   Copyright (c) International Business Machines  Corp., 2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 747a540..37c2841 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/export.c
  *
  *   Copyright (C) International Business Machines  Corp., 2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
index d021647..6796fc7 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/file.c
  *
  *   vfs operations that deal with files
  *
@@ -883,6 +882,7 @@ int cifs_close(struct inode *inode, struct file *file)
                dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
                if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
                    cinode->lease_granted &&
+                   !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
                    dclose) {
                        if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
                                inode->i_ctime = inode->i_mtime = current_time(inode);
@@ -1865,6 +1865,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
        cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
                        tcon->ses->server);
        cifs_sb = CIFS_FILE_SB(file);
+       set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
 
        if (cap_unix(tcon->ses) &&
            (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
index fab47fa..8eedd20 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/fscache.c - CIFS filesystem cache interface
+ *   CIFS filesystem cache interface
  *
  *   Copyright (c) 2010 Novell, Inc.
  *   Author(s): Suresh Jayaraman <sjayaraman@suse.de>
index 82e856b..9baa1d0 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/fscache.h - CIFS filesystem cache interface definitions
+ *   CIFS filesystem cache interface definitions
  *
  *   Copyright (c) 2010 Novell, Inc.
  *   Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
index 50c01cf..8284841 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/inode.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2010
  *   Author(s): Steve French (sfrench@us.ibm.com)
@@ -1625,7 +1624,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
                goto unlink_out;
        }
 
-       cifs_close_deferred_file(CIFS_I(inode));
+       cifs_close_deferred_file_under_dentry(tcon, full_path);
        if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
                                le64_to_cpu(tcon->fsUnixInfo.Capability))) {
                rc = CIFSPOSIXDelFile(xid, tcon, full_path,
@@ -2114,9 +2113,9 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
                goto cifs_rename_exit;
        }
 
-       cifs_close_deferred_file(CIFS_I(d_inode(source_dentry)));
+       cifs_close_deferred_file_under_dentry(tcon, from_name);
        if (d_inode(target_dentry) != NULL)
-               cifs_close_deferred_file(CIFS_I(d_inode(target_dentry)));
+               cifs_close_deferred_file_under_dentry(tcon, to_name);
 
        rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
                            to_name);
index 42c6a0b..0359b60 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/ioctl.c
  *
  *   vfs operations that deal with io control
  *
@@ -359,7 +358,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                        if (pSMBFile == NULL)
                                break;
                        tcon = tlink_tcon(pSMBFile->tlink);
-                       caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+                       /* caps = le64_to_cpu(tcon->fsUnixInfo.Capability); */
 
                        if (get_user(ExtAttrBits, (int __user *)arg)) {
                                rc = -EFAULT;
index f0a6d63..852e54e 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/link.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 9469f1c..03da00e 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/misc.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
@@ -736,7 +735,7 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
                        if (cancel_delayed_work(&cfile->deferred)) {
                                tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
                                if (tmp_list == NULL)
-                                       continue;
+                                       break;
                                tmp_list->cfile = cfile;
                                list_add_tail(&tmp_list->list, &file_head);
                        }
@@ -767,7 +766,7 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
                        if (cancel_delayed_work(&cfile->deferred)) {
                                tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
                                if (tmp_list == NULL)
-                                       continue;
+                                       break;
                                tmp_list->cfile = cfile;
                                list_add_tail(&tmp_list->list, &file_head);
                        }
@@ -781,6 +780,43 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
                kfree(tmp_list);
        }
 }
+void
+cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
+{
+       struct cifsFileInfo *cfile;
+       struct list_head *tmp;
+       struct file_list *tmp_list, *tmp_next_list;
+       struct list_head file_head;
+       void *page;
+       const char *full_path;
+
+       INIT_LIST_HEAD(&file_head);
+       page = alloc_dentry_path();
+       spin_lock(&tcon->open_file_lock);
+       list_for_each(tmp, &tcon->openFileList) {
+               cfile = list_entry(tmp, struct cifsFileInfo, tlist);
+               full_path = build_path_from_dentry(cfile->dentry, page);
+               if (strstr(full_path, path)) {
+                       if (delayed_work_pending(&cfile->deferred)) {
+                               if (cancel_delayed_work(&cfile->deferred)) {
+                                       tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+                                       if (tmp_list == NULL)
+                                               break;
+                                       tmp_list->cfile = cfile;
+                                       list_add_tail(&tmp_list->list, &file_head);
+                               }
+                       }
+               }
+       }
+       spin_unlock(&tcon->open_file_lock);
+
+       list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+               _cifsFileInfo_put(tmp_list->cfile, true, false);
+               list_del(&tmp_list->list);
+               kfree(tmp_list);
+       }
+       free_dentry_path(page);
+}
 
 /* parses DFS refferal V3 structure
  * caller is responsible for freeing target_nodes
index 0e728aa..fa9fbd6 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- *   fs/cifs/netmisc.c
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 378133c..25a2b8e 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/ntlmssp.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 54d77c9..1929e80 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/readdir.c
  *
  *   Directory search handling
  *
index 137f7c9..ae1d025 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/rfc1002pdu.h
  *
  *   Protocol Data Unit definitions for RFC 1001/1002 support
  *
index 118403f..23e02db 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/sess.c
  *
  *   SMB/CIFS session setup handling routines
  *
index c9d8a50..f5dcc49 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/smb2file.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002, 2011
  *   Author(s): Steve French (sfrench@us.ibm.com),
index d0e9f37..ca692b2 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/smb2glob.h
  *
  *   Definitions for various global variables and structures
  *
index 957b259..8297703 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/smb2inode.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002, 2011
  *                 Etersoft, 2012
index 668f771..29b5554 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/smb2misc.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2011
  *                 Etersoft, 2012
index b6d2e35..672ae78 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/smb2pdu.c
  *
  *   Copyright (C) International Business Machines  Corp., 2009, 2013
  *                 Etersoft, 2012
index e9cac79..f32c99c 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/smb2pdu.h
  *
  *   Copyright (c) International Business Machines  Corp., 2009, 2013
  *                 Etersoft, 2012
index 263767f..5479454 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/smb2proto.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002, 2011
  *                 Etersoft, 2012
index 0215ef3..a9e9581 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/smb2status.h
  *
  *   SMB2 Status code (network error) definitions
  *   Definitions are from MS-ERREF
index 6f7952e..f59b956 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/smb2transport.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002, 2011
  *                 Etersoft, 2012
index 60189ef..aeffdad 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/smberr.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2004
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 75a95de..b737932 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/transport.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 59b6c57..2f075b5 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- * fs/cifs/winucase.c
  *
  * Copyright (c) Jeffrey Layton <jlayton@redhat.com>, 2013
  *
index 9ed481e..7d8b72d 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/xattr.c
  *
  *   Copyright (c) International Business Machines  Corp., 2003, 2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 6c55362..c2e0e8e 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/rculist_nulls.h>
 #include <linux/cpu.h>
 #include <linux/tracehook.h>
+#include <uapi/linux/io_uring.h>
 
 #include "io-wq.h"
 
@@ -176,7 +177,6 @@ static void io_worker_ref_put(struct io_wq *wq)
 static void io_worker_exit(struct io_worker *worker)
 {
        struct io_wqe *wqe = worker->wqe;
-       struct io_wqe_acct *acct = io_wqe_get_acct(worker);
 
        if (refcount_dec_and_test(&worker->ref))
                complete(&worker->ref_done);
@@ -186,7 +186,6 @@ static void io_worker_exit(struct io_worker *worker)
        if (worker->flags & IO_WORKER_F_FREE)
                hlist_nulls_del_rcu(&worker->nulls_node);
        list_del_rcu(&worker->all_list);
-       acct->nr_workers--;
        preempt_disable();
        io_wqe_dec_running(worker);
        worker->flags = 0;
@@ -246,8 +245,6 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
  */
 static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
 {
-       bool do_create = false;
-
        /*
         * Most likely an attempt to queue unbounded work on an io_wq that
         * wasn't setup with any unbounded workers.
@@ -256,18 +253,15 @@ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
                pr_warn_once("io-wq is not configured for unbound workers");
 
        raw_spin_lock(&wqe->lock);
-       if (acct->nr_workers < acct->max_workers) {
-               acct->nr_workers++;
-               do_create = true;
+       if (acct->nr_workers == acct->max_workers) {
+               raw_spin_unlock(&wqe->lock);
+               return true;
        }
+       acct->nr_workers++;
        raw_spin_unlock(&wqe->lock);
-       if (do_create) {
-               atomic_inc(&acct->nr_running);
-               atomic_inc(&wqe->wq->worker_refs);
-               return create_io_worker(wqe->wq, wqe, acct->index);
-       }
-
-       return true;
+       atomic_inc(&acct->nr_running);
+       atomic_inc(&wqe->wq->worker_refs);
+       return create_io_worker(wqe->wq, wqe, acct->index);
 }
 
 static void io_wqe_inc_running(struct io_worker *worker)
@@ -574,6 +568,7 @@ loop:
                }
                /* timed out, exit unless we're the last worker */
                if (last_timeout && acct->nr_workers > 1) {
+                       acct->nr_workers--;
                        raw_spin_unlock(&wqe->lock);
                        __set_current_state(TASK_RUNNING);
                        break;
@@ -1287,6 +1282,10 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
 {
        int i, node, prev = 0;
 
+       BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND   != (int) IO_WQ_BOUND);
+       BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
+       BUILD_BUG_ON((int) IO_WQ_ACCT_NR      != 2);
+
        for (i = 0; i < 2; i++) {
                if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
                        new_count[i] = task_rlimit(current, RLIMIT_NPROC);
index 16fb743..e372d5b 100644 (file)
@@ -712,6 +712,7 @@ struct io_async_rw {
        struct iovec                    fast_iov[UIO_FASTIOV];
        const struct iovec              *free_iovec;
        struct iov_iter                 iter;
+       struct iov_iter_state           iter_state;
        size_t                          bytes_done;
        struct wait_page_queue          wpq;
 };
@@ -735,7 +736,6 @@ enum {
        REQ_F_BUFFER_SELECTED_BIT,
        REQ_F_COMPLETE_INLINE_BIT,
        REQ_F_REISSUE_BIT,
-       REQ_F_DONT_REISSUE_BIT,
        REQ_F_CREDS_BIT,
        REQ_F_REFCOUNT_BIT,
        REQ_F_ARM_LTIMEOUT_BIT,
@@ -782,8 +782,6 @@ enum {
        REQ_F_COMPLETE_INLINE   = BIT(REQ_F_COMPLETE_INLINE_BIT),
        /* caller should reissue async */
        REQ_F_REISSUE           = BIT(REQ_F_REISSUE_BIT),
-       /* don't attempt request reissue, see io_rw_reissue() */
-       REQ_F_DONT_REISSUE      = BIT(REQ_F_DONT_REISSUE_BIT),
        /* supports async reads */
        REQ_F_NOWAIT_READ       = BIT(REQ_F_NOWAIT_READ_BIT),
        /* supports async writes */
@@ -2444,13 +2442,6 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
                req = list_first_entry(done, struct io_kiocb, inflight_entry);
                list_del(&req->inflight_entry);
 
-               if (READ_ONCE(req->result) == -EAGAIN &&
-                   !(req->flags & REQ_F_DONT_REISSUE)) {
-                       req->iopoll_completed = 0;
-                       io_req_task_queue_reissue(req);
-                       continue;
-               }
-
                __io_cqring_fill_event(ctx, req->user_data, req->result,
                                        io_put_rw_kbuf(req));
                (*nr_events)++;
@@ -2613,8 +2604,7 @@ static bool io_resubmit_prep(struct io_kiocb *req)
 
        if (!rw)
                return !io_req_prep_async(req);
-       /* may have left rw->iter inconsistent on -EIOCBQUEUED */
-       iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
+       iov_iter_restore(&rw->iter, &rw->iter_state);
        return true;
 }
 
@@ -2714,10 +2704,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
        if (kiocb->ki_flags & IOCB_WRITE)
                kiocb_end_write(req);
        if (unlikely(res != req->result)) {
-               if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
-                   io_resubmit_prep(req))) {
-                       req_set_fail(req);
-                       req->flags |= REQ_F_DONT_REISSUE;
+               if (res == -EAGAIN && io_rw_should_reissue(req)) {
+                       req->flags |= REQ_F_REISSUE;
+                       return;
                }
        }
 
@@ -2843,7 +2832,8 @@ static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
        return __io_file_supports_nowait(req->file, rw);
 }
 
-static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                     int rw)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct kiocb *kiocb = &req->rw.kiocb;
@@ -2865,8 +2855,13 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (unlikely(ret))
                return ret;
 
-       /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
-       if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
+       /*
+        * If the file is marked O_NONBLOCK, still allow retry for it if it
+        * supports async. Otherwise it's impossible to use O_NONBLOCK files
+        * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
+        */
+       if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+           ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw)))
                req->flags |= REQ_F_NOWAIT;
 
        ioprio = READ_ONCE(sqe->ioprio);
@@ -2931,7 +2926,6 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
        struct io_async_rw *io = req->async_data;
-       bool check_reissue = kiocb->ki_complete == io_complete_rw;
 
        /* add previously done IO, if any */
        if (io && io->bytes_done > 0) {
@@ -2943,19 +2937,27 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
 
        if (req->flags & REQ_F_CUR_POS)
                req->file->f_pos = kiocb->ki_pos;
-       if (ret >= 0 && check_reissue)
+       if (ret >= 0 && (kiocb->ki_complete == io_complete_rw))
                __io_complete_rw(req, ret, 0, issue_flags);
        else
                io_rw_done(kiocb, ret);
 
-       if (check_reissue && (req->flags & REQ_F_REISSUE)) {
+       if (req->flags & REQ_F_REISSUE) {
                req->flags &= ~REQ_F_REISSUE;
                if (io_resubmit_prep(req)) {
                        io_req_task_queue_reissue(req);
                } else {
+                       unsigned int cflags = io_put_rw_kbuf(req);
+                       struct io_ring_ctx *ctx = req->ctx;
+
                        req_set_fail(req);
-                       __io_req_complete(req, issue_flags, ret,
-                                         io_put_rw_kbuf(req));
+                       if (issue_flags & IO_URING_F_NONBLOCK) {
+                               mutex_lock(&ctx->uring_lock);
+                               __io_req_complete(req, issue_flags, ret, cflags);
+                               mutex_unlock(&ctx->uring_lock);
+                       } else {
+                               __io_req_complete(req, issue_flags, ret, cflags);
+                       }
                }
        }
 }
@@ -3263,12 +3265,15 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
                                ret = nr;
                        break;
                }
+               if (!iov_iter_is_bvec(iter)) {
+                       iov_iter_advance(iter, nr);
+               } else {
+                       req->rw.len -= nr;
+                       req->rw.addr += nr;
+               }
                ret += nr;
                if (nr != iovec.iov_len)
                        break;
-               req->rw.len -= nr;
-               req->rw.addr += nr;
-               iov_iter_advance(iter, nr);
        }
 
        return ret;
@@ -3315,12 +3320,17 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
        if (!force && !io_op_defs[req->opcode].needs_async_setup)
                return 0;
        if (!req->async_data) {
+               struct io_async_rw *iorw;
+
                if (io_alloc_async_data(req)) {
                        kfree(iovec);
                        return -ENOMEM;
                }
 
                io_req_map_rw(req, iovec, fast_iov, iter);
+               iorw = req->async_data;
+               /* we've copied and mapped the iter, ensure state is saved */
+               iov_iter_save_state(&iorw->iter, &iorw->iter_state);
        }
        return 0;
 }
@@ -3339,6 +3349,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
        iorw->free_iovec = iov;
        if (iov)
                req->flags |= REQ_F_NEED_CLEANUP;
+       iov_iter_save_state(&iorw->iter, &iorw->iter_state);
        return 0;
 }
 
@@ -3346,7 +3357,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        if (unlikely(!(req->file->f_mode & FMODE_READ)))
                return -EBADF;
-       return io_prep_rw(req, sqe);
+       return io_prep_rw(req, sqe, READ);
 }
 
 /*
@@ -3442,19 +3453,28 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
        struct kiocb *kiocb = &req->rw.kiocb;
        struct iov_iter __iter, *iter = &__iter;
        struct io_async_rw *rw = req->async_data;
-       ssize_t io_size, ret, ret2;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+       struct iov_iter_state __state, *state;
+       ssize_t ret, ret2;
 
        if (rw) {
                iter = &rw->iter;
+               state = &rw->iter_state;
+               /*
+                * We come here from an earlier attempt, restore our state to
+                * match in case it doesn't. It's cheap enough that we don't
+                * need to make this conditional.
+                */
+               iov_iter_restore(iter, state);
                iovec = NULL;
        } else {
                ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
                if (ret < 0)
                        return ret;
+               state = &__state;
+               iov_iter_save_state(iter, state);
        }
-       io_size = iov_iter_count(iter);
-       req->result = io_size;
+       req->result = iov_iter_count(iter);
 
        /* Ensure we clear previously set non-block flag */
        if (!force_nonblock)
@@ -3468,7 +3488,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                return ret ?: -EAGAIN;
        }
 
-       ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
+       ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result);
        if (unlikely(ret)) {
                kfree(iovec);
                return ret;
@@ -3484,30 +3504,49 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                /* no retry on NONBLOCK nor RWF_NOWAIT */
                if (req->flags & REQ_F_NOWAIT)
                        goto done;
-               /* some cases will consume bytes even on error returns */
-               iov_iter_reexpand(iter, iter->count + iter->truncated);
-               iov_iter_revert(iter, io_size - iov_iter_count(iter));
                ret = 0;
        } else if (ret == -EIOCBQUEUED) {
                goto out_free;
-       } else if (ret <= 0 || ret == io_size || !force_nonblock ||
+       } else if (ret <= 0 || ret == req->result || !force_nonblock ||
                   (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
                /* read all, failed, already did sync or don't want to retry */
                goto done;
        }
 
+       /*
+        * Don't depend on the iter state matching what was consumed, or being
+        * untouched in case of error. Restore it and we'll advance it
+        * manually if we need to.
+        */
+       iov_iter_restore(iter, state);
+
        ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
        if (ret2)
                return ret2;
 
        iovec = NULL;
        rw = req->async_data;
-       /* now use our persistent iterator, if we aren't already */
-       iter = &rw->iter;
+       /*
+        * Now use our persistent iterator and state, if we aren't already.
+        * We've restored and mapped the iter to match.
+        */
+       if (iter != &rw->iter) {
+               iter = &rw->iter;
+               state = &rw->iter_state;
+       }
 
        do {
-               io_size -= ret;
+               /*
+                * We end up here because of a partial read, either from
+                * above or inside this loop. Advance the iter by the bytes
+                * that were consumed.
+                */
+               iov_iter_advance(iter, ret);
+               if (!iov_iter_count(iter))
+                       break;
                rw->bytes_done += ret;
+               iov_iter_save_state(iter, state);
+
                /* if we can retry, do so with the callbacks armed */
                if (!io_rw_should_retry(req)) {
                        kiocb->ki_flags &= ~IOCB_WAITQ;
@@ -3525,7 +3564,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                        return 0;
                /* we got some bytes, but not all. retry. */
                kiocb->ki_flags &= ~IOCB_WAITQ;
-       } while (ret > 0 && ret < io_size);
+               iov_iter_restore(iter, state);
+       } while (ret > 0);
 done:
        kiocb_done(kiocb, ret, issue_flags);
 out_free:
@@ -3539,7 +3579,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
                return -EBADF;
-       return io_prep_rw(req, sqe);
+       return io_prep_rw(req, sqe, WRITE);
 }
 
 static int io_write(struct io_kiocb *req, unsigned int issue_flags)
@@ -3548,19 +3588,24 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
        struct kiocb *kiocb = &req->rw.kiocb;
        struct iov_iter __iter, *iter = &__iter;
        struct io_async_rw *rw = req->async_data;
-       ssize_t ret, ret2, io_size;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+       struct iov_iter_state __state, *state;
+       ssize_t ret, ret2;
 
        if (rw) {
                iter = &rw->iter;
+               state = &rw->iter_state;
+               iov_iter_restore(iter, state);
                iovec = NULL;
        } else {
                ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
                if (ret < 0)
                        return ret;
+               state = &__state;
+               iov_iter_save_state(iter, state);
        }
-       io_size = iov_iter_count(iter);
-       req->result = io_size;
+       req->result = iov_iter_count(iter);
+       ret2 = 0;
 
        /* Ensure we clear previously set non-block flag */
        if (!force_nonblock)
@@ -3577,7 +3622,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
            (req->flags & REQ_F_ISREG))
                goto copy_iov;
 
-       ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
+       ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result);
        if (unlikely(ret))
                goto out_free;
 
@@ -3624,9 +3669,9 @@ done:
                kiocb_done(kiocb, ret2, issue_flags);
        } else {
 copy_iov:
-               /* some cases will consume bytes even on error returns */
-               iov_iter_reexpand(iter, iter->count + iter->truncated);
-               iov_iter_revert(iter, io_size - iov_iter_count(iter));
+               iov_iter_restore(iter, state);
+               if (ret2 > 0)
+                       iov_iter_advance(iter, ret2);
                ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
                return ret ?: -EAGAIN;
        }
@@ -7515,6 +7560,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                        break;
        } while (1);
 
+       if (uts) {
+               struct timespec64 ts;
+
+               if (get_timespec64(&ts, uts))
+                       return -EFAULT;
+               timeout = timespec64_to_jiffies(&ts);
+       }
+
        if (sig) {
 #ifdef CONFIG_COMPAT
                if (in_compat_syscall())
@@ -7528,14 +7581,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                        return ret;
        }
 
-       if (uts) {
-               struct timespec64 ts;
-
-               if (get_timespec64(&ts, uts))
-                       return -EFAULT;
-               timeout = timespec64_to_jiffies(&ts);
-       }
-
        init_waitqueue_func_entry(&iowq.wq, io_wake_function);
        iowq.wq.private = current;
        INIT_LIST_HEAD(&iowq.wq.entry);
@@ -8284,11 +8329,27 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
 #endif
 }
 
+static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
+                                struct io_rsrc_node *node, void *rsrc)
+{
+       struct io_rsrc_put *prsrc;
+
+       prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
+       if (!prsrc)
+               return -ENOMEM;
+
+       prsrc->tag = *io_get_tag_slot(data, idx);
+       prsrc->rsrc = rsrc;
+       list_add(&prsrc->list, &node->rsrc_list);
+       return 0;
+}
+
 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
                                 unsigned int issue_flags, u32 slot_index)
 {
        struct io_ring_ctx *ctx = req->ctx;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+       bool needs_switch = false;
        struct io_fixed_file *file_slot;
        int ret = -EBADF;
 
@@ -8304,9 +8365,22 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
 
        slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
        file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
-       ret = -EBADF;
-       if (file_slot->file_ptr)
-               goto err;
+
+       if (file_slot->file_ptr) {
+               struct file *old_file;
+
+               ret = io_rsrc_node_switch_start(ctx);
+               if (ret)
+                       goto err;
+
+               old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
+               ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
+                                           ctx->rsrc_node, old_file);
+               if (ret)
+                       goto err;
+               file_slot->file_ptr = 0;
+               needs_switch = true;
+       }
 
        *io_get_tag_slot(ctx->file_data, slot_index) = 0;
        io_fixed_file_set(file_slot, file);
@@ -8318,27 +8392,14 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
 
        ret = 0;
 err:
+       if (needs_switch)
+               io_rsrc_node_switch(ctx, ctx->file_data);
        io_ring_submit_unlock(ctx, !force_nonblock);
        if (ret)
                fput(file);
        return ret;
 }
 
-static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
-                                struct io_rsrc_node *node, void *rsrc)
-{
-       struct io_rsrc_put *prsrc;
-
-       prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
-       if (!prsrc)
-               return -ENOMEM;
-
-       prsrc->tag = *io_get_tag_slot(data, idx);
-       prsrc->rsrc = rsrc;
-       list_add(&prsrc->list, &node->rsrc_list);
-       return 0;
-}
-
 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
                                 struct io_uring_rsrc_update2 *up,
                                 unsigned nr_args)
@@ -10560,10 +10621,12 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
                         * ordering. Fine to drop uring_lock here, we hold
                         * a ref to the ctx.
                         */
+                       refcount_inc(&sqd->refs);
                        mutex_unlock(&ctx->uring_lock);
                        mutex_lock(&sqd->lock);
                        mutex_lock(&ctx->uring_lock);
-                       tctx = sqd->thread->io_uring;
+                       if (sqd->thread)
+                               tctx = sqd->thread->io_uring;
                }
        } else {
                tctx = current->io_uring;
@@ -10577,16 +10640,20 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
        if (ret)
                goto err;
 
-       if (sqd)
+       if (sqd) {
                mutex_unlock(&sqd->lock);
+               io_put_sq_data(sqd);
+       }
 
        if (copy_to_user(arg, new_count, sizeof(new_count)))
                return -EFAULT;
 
        return 0;
 err:
-       if (sqd)
+       if (sqd) {
                mutex_unlock(&sqd->lock);
+               io_put_sq_data(sqd);
+       }
        return ret;
 }
 
index 0b307ca..3eac3c0 100644 (file)
@@ -191,19 +191,77 @@ int get_nlink(struct kstat *st)
        return nlink;
 }
 
-void ksmbd_conv_path_to_unix(char *path)
+char *ksmbd_conv_path_to_unix(char *path)
 {
+       size_t path_len, remain_path_len, out_path_len;
+       char *out_path, *out_next;
+       int i, pre_dotdot_cnt = 0, slash_cnt = 0;
+       bool is_last;
+
        strreplace(path, '\\', '/');
-}
+       path_len = strlen(path);
+       remain_path_len = path_len;
+       if (path_len == 0)
+               return ERR_PTR(-EINVAL);
 
-void ksmbd_strip_last_slash(char *path)
-{
-       int len = strlen(path);
+       out_path = kzalloc(path_len + 2, GFP_KERNEL);
+       if (!out_path)
+               return ERR_PTR(-ENOMEM);
+       out_path_len = 0;
+       out_next = out_path;
+
+       do {
+               char *name = path + path_len - remain_path_len;
+               char *next = strchrnul(name, '/');
+               size_t name_len = next - name;
+
+               is_last = !next[0];
+               if (name_len == 2 && name[0] == '.' && name[1] == '.') {
+                       pre_dotdot_cnt++;
+                       /* handle the case that path ends with "/.." */
+                       if (is_last)
+                               goto follow_dotdot;
+               } else {
+                       if (pre_dotdot_cnt) {
+follow_dotdot:
+                               slash_cnt = 0;
+                               for (i = out_path_len - 1; i >= 0; i--) {
+                                       if (out_path[i] == '/' &&
+                                           ++slash_cnt == pre_dotdot_cnt + 1)
+                                               break;
+                               }
+
+                               if (i < 0 &&
+                                   slash_cnt != pre_dotdot_cnt) {
+                                       kfree(out_path);
+                                       return ERR_PTR(-EINVAL);
+                               }
+
+                               out_next = &out_path[i+1];
+                               *out_next = '\0';
+                               out_path_len = i + 1;
 
-       while (len && path[len - 1] == '/') {
-               path[len - 1] = '\0';
-               len--;
-       }
+                       }
+
+                       if (name_len != 0 &&
+                           !(name_len == 1 && name[0] == '.') &&
+                           !(name_len == 2 && name[0] == '.' && name[1] == '.')) {
+                               next[0] = '\0';
+                               sprintf(out_next, "%s/", name);
+                               out_next += name_len + 1;
+                               out_path_len += name_len + 1;
+                               next[0] = '/';
+                       }
+                       pre_dotdot_cnt = 0;
+               }
+
+               remain_path_len -= name_len + 1;
+       } while (!is_last);
+
+       if (out_path_len > 0)
+               out_path[out_path_len-1] = '\0';
+       path[path_len] = '\0';
+       return out_path;
 }
 
 void ksmbd_conv_path_to_windows(char *path)
index af8717d..b7b1013 100644 (file)
@@ -16,8 +16,7 @@ int ksmbd_validate_filename(char *filename);
 int parse_stream_name(char *filename, char **stream_name, int *s_type);
 char *convert_to_nt_pathname(char *filename, char *sharepath);
 int get_nlink(struct kstat *st);
-void ksmbd_conv_path_to_unix(char *path);
-void ksmbd_strip_last_slash(char *path);
+char *ksmbd_conv_path_to_unix(char *path);
 void ksmbd_conv_path_to_windows(char *path);
 char *ksmbd_extract_sharename(char *treename);
 char *convert_to_unix_name(struct ksmbd_share_config *share, char *name);
index c86164d..6304c9b 100644 (file)
@@ -634,7 +634,7 @@ static char *
 smb2_get_name(struct ksmbd_share_config *share, const char *src,
              const int maxlen, struct nls_table *local_nls)
 {
-       char *name, *unixname;
+       char *name, *norm_name, *unixname;
 
        name = smb_strndup_from_utf16(src, maxlen, 1, local_nls);
        if (IS_ERR(name)) {
@@ -643,11 +643,15 @@ smb2_get_name(struct ksmbd_share_config *share, const char *src,
        }
 
        /* change it to absolute unix name */
-       ksmbd_conv_path_to_unix(name);
-       ksmbd_strip_last_slash(name);
-
-       unixname = convert_to_unix_name(share, name);
+       norm_name = ksmbd_conv_path_to_unix(name);
+       if (IS_ERR(norm_name)) {
+               kfree(name);
+               return norm_name;
+       }
        kfree(name);
+
+       unixname = convert_to_unix_name(share, norm_name);
+       kfree(norm_name);
        if (!unixname) {
                pr_err("can not convert absolute name\n");
                return ERR_PTR(-ENOMEM);
@@ -4041,6 +4045,10 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
        path = &fp->filp->f_path;
        /* single EA entry is requested with given user.* name */
        if (req->InputBufferLength) {
+               if (le32_to_cpu(req->InputBufferLength) <
+                   sizeof(struct smb2_ea_info_req))
+                       return -EINVAL;
+
                ea_req = (struct smb2_ea_info_req *)req->Buffer;
        } else {
                /* need to send all EAs, if no specific EA is requested*/
index 52b2556..3a7fa23 100644 (file)
@@ -20,7 +20,6 @@
 #define SUBMOD_NAME    "smb_direct"
 
 #include <linux/kthread.h>
-#include <linux/rwlock.h>
 #include <linux/list.h>
 #include <linux/mempool.h>
 #include <linux/highmem.h>
index c69a0bb..4f1a451 100644 (file)
@@ -134,18 +134,9 @@ svcxdr_decode_owner(struct xdr_stream *xdr, struct xdr_netobj *obj)
 static inline bool
 svcxdr_encode_owner(struct xdr_stream *xdr, const struct xdr_netobj *obj)
 {
-       unsigned int quadlen = XDR_QUADLEN(obj->len);
-       __be32 *p;
-
-       if (xdr_stream_encode_u32(xdr, obj->len) < 0)
-               return false;
-       p = xdr_reserve_space(xdr, obj->len);
-       if (!p)
+       if (obj->len > XDR_MAX_NETOBJ)
                return false;
-       p[quadlen - 1] = 0;     /* XDR pad */
-       memcpy(p, obj->data, obj->len);
-
-       return true;
+       return xdr_stream_encode_opaque(xdr, obj->data, obj->len) > 0;
 }
 
 #endif /* _LOCKD_SVCXDR_H_ */
index 4235641..3f4027a 100644 (file)
@@ -3570,7 +3570,7 @@ static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_s
 }
 
 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
-                               struct nfsd4_session *session, u32 req)
+               struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
 {
        struct nfs4_client *clp = session->se_client;
        struct svc_xprt *xpt = rqst->rq_xprt;
@@ -3593,6 +3593,8 @@ static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
        else
                status = nfserr_inval;
        spin_unlock(&clp->cl_lock);
+       if (status == nfs_ok && conn)
+               *conn = c;
        return status;
 }
 
@@ -3617,8 +3619,16 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
        status = nfserr_wrong_cred;
        if (!nfsd4_mach_creds_match(session->se_client, rqstp))
                goto out;
-       status = nfsd4_match_existing_connection(rqstp, session, bcts->dir);
-       if (status == nfs_ok || status == nfserr_inval)
+       status = nfsd4_match_existing_connection(rqstp, session,
+                       bcts->dir, &conn);
+       if (status == nfs_ok) {
+               if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
+                               bcts->dir == NFS4_CDFC4_BACK)
+                       conn->cn_flags |= NFS4_CDFC4_BACK;
+               nfsd4_probe_callback(session->se_client);
+               goto out;
+       }
+       if (status == nfserr_inval)
                goto out;
        status = nfsd4_map_bcts_dir(&bcts->dir);
        if (status)
index 2a66844..66645a5 100644 (file)
  * depending on the status field in the last byte. The
  * first byte is where the name start either way, and a
  * zero means it's empty.
+ *
+ * Also, due to a bug in gcc, we don't want to use the
+ * real (differently sized) name arrays in the inode and
+ * link entries, but always the 'de_name[]' one in the
+ * fake struct entry.
+ *
+ * See
+ *
+ *   https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99578#c6
+ *
+ * for details, but basically gcc will take the size of the
+ * 'name' array from one of the used union entries randomly.
+ *
+ * This use of 'de_name[]' (48 bytes) avoids the false positive
+ * warnings that would happen if gcc decides to use 'inode.di_name'
+ * (16 bytes) even when the pointer and size were to come from
+ * 'link.dl_name' (48 bytes).
+ *
+ * In all cases the actual name pointer itself is the same, it's
+ * only the gcc internal 'what is the size of this field' logic
+ * that can get confused.
  */
 union qnx4_directory_entry {
        struct {
-               char de_name;
-               char de_pad[62];
-               char de_status;
+               const char de_name[48];
+               u8 de_pad[15];
+               u8 de_status;
        };
        struct qnx4_inode_entry inode;
        struct qnx4_link_info link;
@@ -53,29 +74,26 @@ static int qnx4_readdir(struct file *file, struct dir_context *ctx)
                ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
                for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) {
                        union qnx4_directory_entry *de;
-                       const char *name;
 
                        offset = ix * QNX4_DIR_ENTRY_SIZE;
                        de = (union qnx4_directory_entry *) (bh->b_data + offset);
 
-                       if (!de->de_name)
+                       if (!de->de_name[0])
                                continue;
                        if (!(de->de_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
                                continue;
                        if (!(de->de_status & QNX4_FILE_LINK)) {
                                size = sizeof(de->inode.di_fname);
-                               name = de->inode.di_fname;
                                ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
                        } else {
                                size = sizeof(de->link.dl_fname);
-                               name = de->link.dl_fname;
                                ino = ( le32_to_cpu(de->link.dl_inode_blk) - 1 ) *
                                        QNX4_INODES_PER_BLOCK +
                                        de->link.dl_inode_ndx;
                        }
-                       size = strnlen(name, size);
+                       size = strnlen(de->de_name, size);
                        QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, name));
-                       if (!dir_emit(ctx, name, size, ino, DT_UNKNOWN)) {
+                       if (!dir_emit(ctx, de->de_name, size, ino, DT_UNKNOWN)) {
                                brelse(bh);
                                return 0;
                        }
index d01e8c9..926f87c 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: LGPL-2.1+ */
 /*
- *   fs/cifs/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions
+ *   SMB, CIFS, SMB2 FSCTL definitions
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2013
  *   Author(s): Steve French (sfrench@us.ibm.com)
index e93375c..cc7338f 100644 (file)
@@ -1023,16 +1023,7 @@ static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
        port &= IO_SPACE_LIMIT;
        return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
 }
-#define __pci_ioport_unmap __pci_ioport_unmap
-static inline void __pci_ioport_unmap(void __iomem *p)
-{
-       uintptr_t start = (uintptr_t) PCI_IOBASE;
-       uintptr_t addr = (uintptr_t) p;
-
-       if (addr >= start && addr < start + IO_SPACE_LIMIT)
-               return;
-       iounmap(p);
-}
+#define ARCH_HAS_GENERIC_IOPORT_MAP
 #endif
 
 #ifndef ioport_unmap
@@ -1048,21 +1039,10 @@ extern void ioport_unmap(void __iomem *p);
 #endif /* CONFIG_HAS_IOPORT_MAP */
 
 #ifndef CONFIG_GENERIC_IOMAP
-struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
-
-#ifndef __pci_ioport_unmap
-static inline void __pci_ioport_unmap(void __iomem *p) {}
-#endif
-
 #ifndef pci_iounmap
-#define pci_iounmap pci_iounmap
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
-{
-       __pci_ioport_unmap(p);
-}
+#define ARCH_WANTS_GENERIC_PCI_IOUNMAP
+#endif
 #endif
-#endif /* CONFIG_GENERIC_IOMAP */
 
 #ifndef xlate_dev_mem_ptr
 #define xlate_dev_mem_ptr xlate_dev_mem_ptr
index 9b3eb6d..08237ae 100644 (file)
@@ -110,16 +110,6 @@ static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
 }
 #endif
 
-#ifdef CONFIG_PCI
-/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
-struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
-#elif defined(CONFIG_GENERIC_IOMAP)
-struct pci_dev;
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{ }
-#endif
-
 #include <asm-generic/pci_iomap.h>
 
 #endif
index df636c6..5a2f9bf 100644 (file)
@@ -18,6 +18,7 @@ extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
 extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
                                        unsigned long offset,
                                        unsigned long maxlen);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
 /* Create a virtual mapping cookie for a port on a given PCI device.
  * Do not call this directly, it exists to make it easier for architectures
  * to override */
@@ -50,6 +51,8 @@ static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
 {
        return NULL;
 }
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{ }
 #endif
 
 #endif /* __ASM_GENERIC_PCI_IOMAP_H */
index e12b524..39039ce 100644 (file)
@@ -1471,6 +1471,7 @@ struct task_struct {
                                        mce_whole_page : 1,
                                        __mce_reserved : 62;
        struct callback_head            mce_kill_me;
+       int                             mce_count;
 #endif
 
 #ifdef CONFIG_KRETPROBES
index 5265024..207101a 100644 (file)
@@ -27,6 +27,12 @@ enum iter_type {
        ITER_DISCARD,
 };
 
+struct iov_iter_state {
+       size_t iov_offset;
+       size_t count;
+       unsigned long nr_segs;
+};
+
 struct iov_iter {
        u8 iter_type;
        bool data_source;
@@ -47,7 +53,6 @@ struct iov_iter {
                };
                loff_t xarray_start;
        };
-       size_t truncated;
 };
 
 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
@@ -55,6 +60,14 @@ static inline enum iter_type iov_iter_type(const struct iov_iter *i)
        return i->iter_type;
 }
 
+static inline void iov_iter_save_state(struct iov_iter *iter,
+                                      struct iov_iter_state *state)
+{
+       state->iov_offset = iter->iov_offset;
+       state->count = iter->count;
+       state->nr_segs = iter->nr_segs;
+}
+
 static inline bool iter_is_iovec(const struct iov_iter *i)
 {
        return iov_iter_type(i) == ITER_IOVEC;
@@ -233,6 +246,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
                        size_t maxsize, size_t *start);
 int iov_iter_npages(const struct iov_iter *i, int maxpages);
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
 
 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
 
@@ -255,10 +269,8 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
         * conversion in assignement is by definition greater than all
         * values of size_t, including old i->count.
         */
-       if (i->count > count) {
-               i->truncated += i->count - count;
+       if (i->count > count)
                i->count = count;
-       }
 }
 
 /*
@@ -267,7 +279,6 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
  */
 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
 {
-       i->truncated -= count - i->count;
        i->count = count;
 }
 
index 9f73ed2..bca73e8 100644 (file)
@@ -306,11 +306,13 @@ enum afs_flock_operation {
 
 enum afs_cb_break_reason {
        afs_cb_break_no_break,
+       afs_cb_break_no_promise,
        afs_cb_break_for_callback,
        afs_cb_break_for_deleted,
        afs_cb_break_for_lapsed,
+       afs_cb_break_for_s_reinit,
        afs_cb_break_for_unlink,
-       afs_cb_break_for_vsbreak,
+       afs_cb_break_for_v_break,
        afs_cb_break_for_volume_callback,
        afs_cb_break_for_zap,
 };
@@ -602,11 +604,13 @@ enum afs_cb_break_reason {
 
 #define afs_cb_break_reasons                                           \
        EM(afs_cb_break_no_break,               "no-break")             \
+       EM(afs_cb_break_no_promise,             "no-promise")           \
        EM(afs_cb_break_for_callback,           "break-cb")             \
        EM(afs_cb_break_for_deleted,            "break-del")            \
        EM(afs_cb_break_for_lapsed,             "break-lapsed")         \
+       EM(afs_cb_break_for_s_reinit,           "s-reinit")             \
        EM(afs_cb_break_for_unlink,             "break-unlink")         \
-       EM(afs_cb_break_for_vsbreak,            "break-vs")             \
+       EM(afs_cb_break_for_v_break,            "break-v")              \
        EM(afs_cb_break_for_volume_callback,    "break-v-cb")           \
        E_(afs_cb_break_for_zap,                "break-zap")
 
index 6982920..8e87d27 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
 /*
- *   include/uapi/linux/cifs/cifs_mount.h
  *
  *   Author(s): Scott Lovenberg (scott.lovenberg@gmail.com)
  *
index 59ef351..b270a07 100644 (file)
@@ -317,13 +317,19 @@ enum {
        IORING_REGISTER_IOWQ_AFF                = 17,
        IORING_UNREGISTER_IOWQ_AFF              = 18,
 
-       /* set/get max number of workers */
+       /* set/get max number of io-wq workers */
        IORING_REGISTER_IOWQ_MAX_WORKERS        = 19,
 
        /* this goes last */
        IORING_REGISTER_LAST
 };
 
+/* io-wq worker categories */
+enum {
+       IO_WQ_BOUND,
+       IO_WQ_UNBOUND,
+};
+
 /* deprecated, see struct io_uring_rsrc_update */
 struct io_uring_files_update {
        __u32 offset;
index 3f72169..81a79a7 100644 (file)
@@ -1242,7 +1242,7 @@ trace_initcall_start_cb(void *data, initcall_t fn)
 {
        ktime_t *calltime = (ktime_t *)data;
 
-       printk(KERN_DEBUG "calling  %pS @ %i irqs_disabled() %d\n", fn, task_pid_nr(current), irqs_disabled());
+       printk(KERN_DEBUG "calling  %pS @ %i\n", fn, task_pid_nr(current));
        *calltime = ktime_get();
 }
 
@@ -1256,8 +1256,8 @@ trace_initcall_finish_cb(void *data, initcall_t fn, int ret)
        rettime = ktime_get();
        delta = ktime_sub(rettime, *calltime);
        duration = (unsigned long long) ktime_to_ns(delta) >> 10;
-       printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs, irqs_disabled() %d\n",
-                fn, ret, duration, irqs_disabled());
+       printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n",
+                fn, ret, duration);
 }
 
 static ktime_t initcall_calltime;
index 6c90c69..95445bd 100644 (file)
@@ -567,7 +567,8 @@ static void add_dma_entry(struct dma_debug_entry *entry)
                pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
                global_disable = true;
        } else if (rc == -EEXIST) {
-               pr_err("cacheline tracking EEXIST, overlapping mappings aren't supported\n");
+               err_printk(entry->dev, entry,
+                       "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
        }
 }
 
index 7ee5284..06fec55 100644 (file)
@@ -206,7 +206,8 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
 /**
  * dma_map_sg_attrs - Map the given buffer for DMA
  * @dev:       The device for which to perform the DMA operation
- * @sg:        The sg_table object describing the buffer
+ * @sg:                The sg_table object describing the buffer
+ * @nents:     Number of entries to map
  * @dir:       DMA direction
  * @attrs:     Optional DMA attributes for the map operation
  *
index 744e872..0c000cb 100644 (file)
@@ -10193,7 +10193,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
                return;
 
        if (ifh->nr_file_filters) {
-               mm = get_task_mm(event->ctx->task);
+               mm = get_task_mm(task);
                if (!mm)
                        goto restart;
 
index 4ba1508..88191f6 100644 (file)
  * The risk of writer starvation is there, but the pathological use cases
  * which trigger it are not necessarily the typical RT workloads.
  *
+ * Fast-path orderings:
+ * The lock/unlock of readers can run in fast paths: lock and unlock are only
+ * atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
+ * semantics of rwbase_rt. Atomic ops should thus provide _acquire()
+ * and _release() (or stronger).
+ *
  * Common code shared between RT rw_semaphore and rwlock
  */
 
@@ -53,6 +59,7 @@ static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
         * set.
         */
        for (r = atomic_read(&rwb->readers); r < 0;) {
+               /* Fully-ordered if cmpxchg() succeeds, provides ACQUIRE */
                if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
                        return 1;
        }
@@ -162,6 +169,8 @@ static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
        /*
         * rwb->readers can only hit 0 when a writer is waiting for the
         * active readers to leave the critical section.
+        *
+        * dec_and_test() is fully ordered, provides RELEASE.
         */
        if (unlikely(atomic_dec_and_test(&rwb->readers)))
                __rwbase_read_unlock(rwb, state);
@@ -172,7 +181,11 @@ static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
 {
        struct rt_mutex_base *rtm = &rwb->rtmutex;
 
-       atomic_add(READER_BIAS - bias, &rwb->readers);
+       /*
+        * _release() is needed in case that reader is in fast path, pairing
+        * with atomic_try_cmpxchg() in rwbase_read_trylock(), provides RELEASE
+        */
+       (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
        raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
        rwbase_rtmutex_unlock(rtm);
 }
@@ -196,6 +209,23 @@ static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
        __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
 }
 
+static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
+{
+       /* Can do without CAS because we're serialized by wait_lock. */
+       lockdep_assert_held(&rwb->rtmutex.wait_lock);
+
+       /*
+        * _acquire is needed in case the reader is in the fast path, pairing
+        * with rwbase_read_unlock(), provides ACQUIRE.
+        */
+       if (!atomic_read_acquire(&rwb->readers)) {
+               atomic_set(&rwb->readers, WRITER_BIAS);
+               return 1;
+       }
+
+       return 0;
+}
+
 static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
                                     unsigned int state)
 {
@@ -210,34 +240,30 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
        atomic_sub(READER_BIAS, &rwb->readers);
 
        raw_spin_lock_irqsave(&rtm->wait_lock, flags);
-       /*
-        * set_current_state() for rw_semaphore
-        * current_save_and_set_rtlock_wait_state() for rwlock
-        */
-       rwbase_set_and_save_current_state(state);
+       if (__rwbase_write_trylock(rwb))
+               goto out_unlock;
 
-       /* Block until all readers have left the critical section. */
-       for (; atomic_read(&rwb->readers);) {
+       rwbase_set_and_save_current_state(state);
+       for (;;) {
                /* Optimized out for rwlocks */
                if (rwbase_signal_pending_state(state, current)) {
-                       __set_current_state(TASK_RUNNING);
+                       rwbase_restore_current_state();
                        __rwbase_write_unlock(rwb, 0, flags);
                        return -EINTR;
                }
+
+               if (__rwbase_write_trylock(rwb))
+                       break;
+
                raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+               rwbase_schedule();
+               raw_spin_lock_irqsave(&rtm->wait_lock, flags);
 
-               /*
-                * Schedule and wait for the readers to leave the critical
-                * section. The last reader leaving it wakes the waiter.
-                */
-               if (atomic_read(&rwb->readers) != 0)
-                       rwbase_schedule();
                set_current_state(state);
-               raw_spin_lock_irqsave(&rtm->wait_lock, flags);
        }
-
-       atomic_set(&rwb->readers, WRITER_BIAS);
        rwbase_restore_current_state();
+
+out_unlock:
        raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
        return 0;
 }
@@ -253,8 +279,7 @@ static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
        atomic_sub(READER_BIAS, &rwb->readers);
 
        raw_spin_lock_irqsave(&rtm->wait_lock, flags);
-       if (!atomic_read(&rwb->readers)) {
-               atomic_set(&rwb->readers, WRITER_BIAS);
+       if (__rwbase_write_trylock(rwb)) {
                raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
                return 1;
        }
index f2d50d6..755c10c 100644 (file)
@@ -1972,3 +1972,39 @@ int import_single_range(int rw, void __user *buf, size_t len,
        return 0;
 }
 EXPORT_SYMBOL(import_single_range);
+
+/**
+ * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
+ *     iov_iter_save_state() was called.
+ *
+ * @i: &struct iov_iter to restore
+ * @state: state to restore from
+ *
+ * Used after iov_iter_save_state() to bring restore @i, if operations may
+ * have advanced it.
+ *
+ * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
+ */
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
+{
+       if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
+                        !iov_iter_is_kvec(i))
+               return;
+       i->iov_offset = state->iov_offset;
+       i->count = state->count;
+       /*
+        * For the *vec iters, nr_segs + iov is constant - if we increment
+        * the vec, then we also decrement the nr_segs count. Hence we don't
+        * need to track both of these, just one is enough and we can deduct
+        * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
+        * size, so we can just increment the iov pointer as they are unionzed.
+        * ITER_BVEC _may_ be the same size on some archs, but on others it is
+        * not. Be safe and handle it separately.
+        */
+       BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
+       if (iov_iter_is_bvec(i))
+               i->bvec -= state->nr_segs - i->nr_segs;
+       else
+               i->iov -= state->nr_segs - i->nr_segs;
+       i->nr_segs = state->nr_segs;
+}
index 2d3eb1c..ce39ce9 100644 (file)
@@ -134,4 +134,47 @@ void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
        return pci_iomap_wc_range(dev, bar, 0, maxlen);
 }
 EXPORT_SYMBOL_GPL(pci_iomap_wc);
+
+/*
+ * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
+ * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
+ * the different IOMAP ranges.
+ *
+ * But if the architecture does not use the generic iomap code, and if
+ * it has _not_ defined it's own private pci_iounmap function, we define
+ * it here.
+ *
+ * NOTE! This default implementation assumes that if the architecture
+ * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
+ * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
+ * and does not need unmapping with 'ioport_unmap()'.
+ *
+ * If you have different rules for your architecture, you need to
+ * implement your own pci_iounmap() that knows the rules for where
+ * and how IO vs MEM get mapped.
+ *
+ * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
+ * from legacy <asm-generic/io.h> header file behavior. In particular,
+ * it would seem to make sense to do the iounmap(p) for the non-IO-space
+ * case here regardless, but that's not what the old header file code
+ * did. Probably incorrectly, but this is meant to be bug-for-bug
+ * compatible.
+ */
+#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
+       uintptr_t start = (uintptr_t) PCI_IOBASE;
+       uintptr_t addr = (uintptr_t) p;
+
+       if (addr >= start && addr < start + IO_SPACE_LIMIT)
+               return;
+       iounmap(p);
+#endif
+}
+EXPORT_SYMBOL(pci_iounmap);
+
+#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
+
 #endif /* CONFIG_PCI */
index b762215..6da5020 100644 (file)
@@ -106,9 +106,6 @@ static bool do_memsw_account(void)
 /* memcg and lruvec stats flushing */
 static void flush_memcg_stats_dwork(struct work_struct *w);
 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
-static void flush_memcg_stats_work(struct work_struct *w);
-static DECLARE_WORK(stats_flush_work, flush_memcg_stats_work);
-static DEFINE_PER_CPU(unsigned int, stats_flush_threshold);
 static DEFINE_SPINLOCK(stats_flush_lock);
 
 #define THRESHOLDS_EVENTS_TARGET 128
@@ -682,8 +679,6 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 
        /* Update lruvec */
        __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
-       if (!(__this_cpu_inc_return(stats_flush_threshold) % MEMCG_CHARGE_BATCH))
-               queue_work(system_unbound_wq, &stats_flush_work);
 }
 
 /**
@@ -5361,11 +5356,6 @@ static void flush_memcg_stats_dwork(struct work_struct *w)
        queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
 }
 
-static void flush_memcg_stats_work(struct work_struct *w)
-{
-       mem_cgroup_flush_stats();
-}
-
 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
index 25fc46e..adf9b9e 100644 (file)
@@ -3403,6 +3403,7 @@ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
        i_mmap_unlock_write(mapping);
 }
+EXPORT_SYMBOL_GPL(unmap_mapping_pages);
 
 /**
  * unmap_mapping_range - unmap the portion of all mmaps in the specified
index d4268d8..d5b81e4 100644 (file)
@@ -352,6 +352,7 @@ void workingset_refault(struct page *page, void *shadow)
 
        inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
 
+       mem_cgroup_flush_stats();
        /*
         * Compare the distance to the existing workingset size. We
         * don't activate pages that couldn't stay resident even if
index 4cce8fd..51fc23e 100644 (file)
@@ -29,7 +29,12 @@ CLANG_FLAGS  += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
 else
 CLANG_FLAGS    += -fintegrated-as
 endif
+# By default, clang only warns when it encounters an unknown warning flag or
+# certain optimization flags it knows it has not implemented.
+# Make it behave more like gcc by erroring when these flags are encountered
+# so they can be implemented or wrapped in cc-option.
 CLANG_FLAGS    += -Werror=unknown-warning-option
+CLANG_FLAGS    += -Werror=ignored-optimization-argument
 KBUILD_CFLAGS  += $(CLANG_FLAGS)
 KBUILD_AFLAGS  += $(CLANG_FLAGS)
 export CLANG_FLAGS
index eef56d6..48585c4 100644 (file)
@@ -13,7 +13,7 @@
 # Stage 2 is handled by this file and does the following
 # 1) Find all modules listed in modules.order
 # 2) modpost is then used to
-# 3)  create one <module>.mod.c file pr. module
+# 3)  create one <module>.mod.c file per module
 # 4)  create one Module.symvers file with CRC for all exported symbols
 
 # Step 3 is used to place certain information in the module's ELF
index b9b0f15..217d21a 100755 (executable)
@@ -34,7 +34,6 @@ REGEX_SOURCE_SYMBOL = re.compile(SOURCE_SYMBOL)
 REGEX_KCONFIG_DEF = re.compile(DEF)
 REGEX_KCONFIG_EXPR = re.compile(EXPR)
 REGEX_KCONFIG_STMT = re.compile(STMT)
-REGEX_KCONFIG_HELP = re.compile(r"^\s+help\s*$")
 REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$")
 REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
 REGEX_QUOTES = re.compile("(\"(.*?)\")")
@@ -102,6 +101,9 @@ def parse_options():
                      "continue.")
 
     if args.commit:
+        if args.commit.startswith('HEAD'):
+            sys.exit("The --commit option can't use the HEAD ref")
+
         args.find = False
 
     if args.ignore:
@@ -432,7 +434,6 @@ def parse_kconfig_file(kfile):
     lines = []
     defined = []
     references = []
-    skip = False
 
     if not os.path.exists(kfile):
         return defined, references
@@ -448,12 +449,6 @@ def parse_kconfig_file(kfile):
         if REGEX_KCONFIG_DEF.match(line):
             symbol_def = REGEX_KCONFIG_DEF.findall(line)
             defined.append(symbol_def[0])
-            skip = False
-        elif REGEX_KCONFIG_HELP.match(line):
-            skip = True
-        elif skip:
-            # ignore content of help messages
-            pass
         elif REGEX_KCONFIG_STMT.match(line):
             line = REGEX_QUOTES.sub("", line)
             symbols = get_symbols_in_line(line)
index 0033eed..1d1bde1 100755 (executable)
@@ -13,6 +13,7 @@ import logging
 import os
 import re
 import subprocess
+import sys
 
 _DEFAULT_OUTPUT = 'compile_commands.json'
 _DEFAULT_LOG_LEVEL = 'WARNING'
index d888672..8441e3e 100644 (file)
@@ -43,7 +43,7 @@ void perf_evsel__delete(struct perf_evsel *evsel)
        free(evsel);
 }
 
-#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
+#define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y))
 #define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
 
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
@@ -54,7 +54,10 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
                int cpu, thread;
                for (cpu = 0; cpu < ncpus; cpu++) {
                        for (thread = 0; thread < nthreads; thread++) {
-                               FD(evsel, cpu, thread) = -1;
+                               int *fd = FD(evsel, cpu, thread);
+
+                               if (fd)
+                                       *fd = -1;
                        }
                }
        }
@@ -80,7 +83,7 @@ sys_perf_event_open(struct perf_event_attr *attr,
 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
 {
        struct perf_evsel *leader = evsel->leader;
-       int fd;
+       int *fd;
 
        if (evsel == leader) {
                *group_fd = -1;
@@ -95,10 +98,10 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *grou
                return -ENOTCONN;
 
        fd = FD(leader, cpu, thread);
-       if (fd == -1)
+       if (fd == NULL || *fd == -1)
                return -EBADF;
 
-       *group_fd = fd;
+       *group_fd = *fd;
 
        return 0;
 }
@@ -138,7 +141,11 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
 
        for (cpu = 0; cpu < cpus->nr; cpu++) {
                for (thread = 0; thread < threads->nr; thread++) {
-                       int fd, group_fd;
+                       int fd, group_fd, *evsel_fd;
+
+                       evsel_fd = FD(evsel, cpu, thread);
+                       if (evsel_fd == NULL)
+                               return -EINVAL;
 
                        err = get_group_fd(evsel, cpu, thread, &group_fd);
                        if (err < 0)
@@ -151,7 +158,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
                        if (fd < 0)
                                return -errno;
 
-                       FD(evsel, cpu, thread) = fd;
+                       *evsel_fd = fd;
                }
        }
 
@@ -163,9 +170,12 @@ static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
        int thread;
 
        for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
-               if (FD(evsel, cpu, thread) >= 0)
-                       close(FD(evsel, cpu, thread));
-               FD(evsel, cpu, thread) = -1;
+               int *fd = FD(evsel, cpu, thread);
+
+               if (fd && *fd >= 0) {
+                       close(*fd);
+                       *fd = -1;
+               }
        }
 }
 
@@ -209,13 +219,12 @@ void perf_evsel__munmap(struct perf_evsel *evsel)
 
        for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
                for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
-                       int fd = FD(evsel, cpu, thread);
-                       struct perf_mmap *map = MMAP(evsel, cpu, thread);
+                       int *fd = FD(evsel, cpu, thread);
 
-                       if (fd < 0)
+                       if (fd == NULL || *fd < 0)
                                continue;
 
-                       perf_mmap__munmap(map);
+                       perf_mmap__munmap(MMAP(evsel, cpu, thread));
                }
        }
 
@@ -239,15 +248,16 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
 
        for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
                for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
-                       int fd = FD(evsel, cpu, thread);
-                       struct perf_mmap *map = MMAP(evsel, cpu, thread);
+                       int *fd = FD(evsel, cpu, thread);
+                       struct perf_mmap *map;
 
-                       if (fd < 0)
+                       if (fd == NULL || *fd < 0)
                                continue;
 
+                       map = MMAP(evsel, cpu, thread);
                        perf_mmap__init(map, NULL, false, NULL);
 
-                       ret = perf_mmap__mmap(map, &mp, fd, cpu);
+                       ret = perf_mmap__mmap(map, &mp, *fd, cpu);
                        if (ret) {
                                perf_evsel__munmap(evsel);
                                return ret;
@@ -260,7 +270,9 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
 
 void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
 {
-       if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
+       int *fd = FD(evsel, cpu, thread);
+
+       if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL)
                return NULL;
 
        return MMAP(evsel, cpu, thread)->base;
@@ -295,17 +307,18 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
                     struct perf_counts_values *count)
 {
        size_t size = perf_evsel__read_size(evsel);
+       int *fd = FD(evsel, cpu, thread);
 
        memset(count, 0, sizeof(*count));
 
-       if (FD(evsel, cpu, thread) < 0)
+       if (fd == NULL || *fd < 0)
                return -EINVAL;
 
        if (MMAP(evsel, cpu, thread) &&
            !perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
                return 0;
 
-       if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
+       if (readn(*fd, count->values, size) <= 0)
                return -errno;
 
        return 0;
@@ -318,8 +331,13 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
        int thread;
 
        for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
-               int fd = FD(evsel, cpu, thread),
-                   err = ioctl(fd, ioc, arg);
+               int err;
+               int *fd = FD(evsel, cpu, thread);
+
+               if (fd == NULL || *fd < 0)
+                       return -1;
+
+               err = ioctl(*fd, ioc, arg);
 
                if (err)
                        return err;
index 0e824f7..6211d0b 100644 (file)
@@ -368,16 +368,6 @@ static inline int output_type(unsigned int type)
        return OUTPUT_TYPE_OTHER;
 }
 
-static inline unsigned int attr_type(unsigned int type)
-{
-       switch (type) {
-       case OUTPUT_TYPE_SYNTH:
-               return PERF_TYPE_SYNTH;
-       default:
-               return type;
-       }
-}
-
 static bool output_set_by_user(void)
 {
        int j;
@@ -556,6 +546,18 @@ static void set_print_ip_opts(struct perf_event_attr *attr)
                output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE;
 }
 
+static struct evsel *find_first_output_type(struct evlist *evlist,
+                                           unsigned int type)
+{
+       struct evsel *evsel;
+
+       evlist__for_each_entry(evlist, evsel) {
+               if (output_type(evsel->core.attr.type) == (int)type)
+                       return evsel;
+       }
+       return NULL;
+}
+
 /*
  * verify all user requested events exist and the samples
  * have the expected data
@@ -567,7 +569,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
        struct evsel *evsel;
 
        for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
-               evsel = perf_session__find_first_evtype(session, attr_type(j));
+               evsel = find_first_output_type(session->evlist, j);
 
                /*
                 * even if fields is set to 0 (ie., show nothing) event must
index 781afe4..fa5bd5c 100644 (file)
@@ -757,25 +757,40 @@ void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
 }
 
 void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
-                           unsigned int row, bool arrow_down)
+                           unsigned int row, int diff, bool arrow_down)
 {
-       unsigned int end_row;
+       int end_row;
 
-       if (row >= browser->top_idx)
-               end_row = row - browser->top_idx;
-       else
+       if (diff <= 0)
                return;
 
        SLsmg_set_char_set(1);
 
        if (arrow_down) {
+               if (row + diff <= browser->top_idx)
+                       return;
+
+               end_row = row + diff - browser->top_idx;
                ui_browser__gotorc(browser, end_row, column - 1);
-               SLsmg_write_char(SLSMG_ULCORN_CHAR);
-               ui_browser__gotorc(browser, end_row, column);
-               SLsmg_draw_hline(2);
-               ui_browser__gotorc(browser, end_row + 1, column - 1);
                SLsmg_write_char(SLSMG_LTEE_CHAR);
+
+               while (--end_row >= 0 && end_row > (int)(row - browser->top_idx)) {
+                       ui_browser__gotorc(browser, end_row, column - 1);
+                       SLsmg_draw_vline(1);
+               }
+
+               end_row = (int)(row - browser->top_idx);
+               if (end_row >= 0) {
+                       ui_browser__gotorc(browser, end_row, column - 1);
+                       SLsmg_write_char(SLSMG_ULCORN_CHAR);
+                       ui_browser__gotorc(browser, end_row, column);
+                       SLsmg_draw_hline(2);
+               }
        } else {
+               if (row < browser->top_idx)
+                       return;
+
+               end_row = row - browser->top_idx;
                ui_browser__gotorc(browser, end_row, column - 1);
                SLsmg_write_char(SLSMG_LTEE_CHAR);
                ui_browser__gotorc(browser, end_row, column);
index 3678eb8..510ce45 100644 (file)
@@ -51,7 +51,7 @@ void ui_browser__write_graph(struct ui_browser *browser, int graph);
 void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
                              u64 start, u64 end);
 void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
-                           unsigned int row, bool arrow_down);
+                           unsigned int row, int diff, bool arrow_down);
 void __ui_browser__show_title(struct ui_browser *browser, const char *title);
 void ui_browser__show_title(struct ui_browser *browser, const char *title);
 int ui_browser__show(struct ui_browser *browser, const char *title,
index ef4da42..e81c249 100644 (file)
@@ -125,13 +125,20 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
                ab->selection = al;
 }
 
-static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
+static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
 {
        struct disasm_line *pos = list_prev_entry(cursor, al.node);
        const char *name;
+       int diff = 1;
+
+       while (pos && pos->al.offset == -1) {
+               pos = list_prev_entry(pos, al.node);
+               if (!ab->opts->hide_src_code)
+                       diff++;
+       }
 
        if (!pos)
-               return false;
+               return 0;
 
        if (ins__is_lock(&pos->ins))
                name = pos->ops.locked.ins.name;
@@ -139,9 +146,11 @@ static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
                name = pos->ins.name;
 
        if (!name || !cursor->ins.name)
-               return false;
+               return 0;
 
-       return ins__is_fused(ab->arch, name, cursor->ins.name);
+       if (ins__is_fused(ab->arch, name, cursor->ins.name))
+               return diff;
+       return 0;
 }
 
 static void annotate_browser__draw_current_jump(struct ui_browser *browser)
@@ -155,6 +164,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
        struct annotation *notes = symbol__annotation(sym);
        u8 pcnt_width = annotation__pcnt_width(notes);
        int width;
+       int diff = 0;
 
        /* PLT symbols contain external offsets */
        if (strstr(sym->name, "@plt"))
@@ -205,11 +215,11 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
                                 pcnt_width + 2 + notes->widths.addr + width,
                                 from, to);
 
-       if (is_fused(ab, cursor)) {
+       diff = is_fused(ab, cursor);
+       if (diff > 0) {
                ui_browser__mark_fused(browser,
                                       pcnt_width + 3 + notes->widths.addr + width,
-                                      from - 1,
-                                      to > from);
+                                      from - diff, diff, to > from);
        }
 }
 
index 683f6d6..1a7112a 100644 (file)
 struct btf * __weak btf__load_from_kernel_by_id(__u32 id)
 {
        struct btf *btf;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
        int err = btf__get_from_id(id, &btf);
+#pragma GCC diagnostic pop
 
        return err ? ERR_PTR(err) : btf;
 }
index da19be7..44e40ba 100644 (file)
@@ -2149,6 +2149,7 @@ static int add_callchain_ip(struct thread *thread,
 
        al.filtered = 0;
        al.sym = NULL;
+       al.srcline = NULL;
        if (!cpumode) {
                thread__find_cpumode_addr_location(thread, ip, &al);
        } else {
index bd1ca25..aed632d 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#include <ppc-asm.h>
+#include <basic_asm.h>
 #include <asm/unistd.h>
 
        .text
@@ -26,3 +26,38 @@ FUNC_START(getppid_tm_suspended)
 1:
        li      r3, -1
        blr
+
+
+.macro scv level
+       .long (0x44000001 | (\level) << 5)
+.endm
+
+FUNC_START(getppid_scv_tm_active)
+       PUSH_BASIC_STACK(0)
+       tbegin.
+       beq 1f
+       li      r0, __NR_getppid
+       scv     0
+       tend.
+       POP_BASIC_STACK(0)
+       blr
+1:
+       li      r3, -1
+       POP_BASIC_STACK(0)
+       blr
+
+FUNC_START(getppid_scv_tm_suspended)
+       PUSH_BASIC_STACK(0)
+       tbegin.
+       beq 1f
+       li      r0, __NR_getppid
+       tsuspend.
+       scv     0
+       tresume.
+       tend.
+       POP_BASIC_STACK(0)
+       blr
+1:
+       li      r3, -1
+       POP_BASIC_STACK(0)
+       blr
index 467a6b3..b763354 100644 (file)
 #include "utils.h"
 #include "tm.h"
 
+#ifndef PPC_FEATURE2_SCV
+#define PPC_FEATURE2_SCV               0x00100000 /* scv syscall */
+#endif
+
 extern int getppid_tm_active(void);
 extern int getppid_tm_suspended(void);
+extern int getppid_scv_tm_active(void);
+extern int getppid_scv_tm_suspended(void);
 
 unsigned retries = 0;
 
 #define TEST_DURATION 10 /* seconds */
 
-pid_t getppid_tm(bool suspend)
+pid_t getppid_tm(bool scv, bool suspend)
 {
        int i;
        pid_t pid;
 
        for (i = 0; i < TM_RETRIES; i++) {
-               if (suspend)
-                       pid = getppid_tm_suspended();
-               else
-                       pid = getppid_tm_active();
+               if (suspend) {
+                       if (scv)
+                               pid = getppid_scv_tm_suspended();
+                       else
+                               pid = getppid_tm_suspended();
+               } else {
+                       if (scv)
+                               pid = getppid_scv_tm_active();
+                       else
+                               pid = getppid_tm_active();
+               }
 
                if (pid >= 0)
                        return pid;
@@ -82,15 +95,24 @@ int tm_syscall(void)
                 * Test a syscall within a suspended transaction and verify
                 * that it succeeds.
                 */
-               FAIL_IF(getppid_tm(true) == -1); /* Should succeed. */
+               FAIL_IF(getppid_tm(false, true) == -1); /* Should succeed. */
 
                /*
                 * Test a syscall within an active transaction and verify that
                 * it fails with the correct failure code.
                 */
-               FAIL_IF(getppid_tm(false) != -1);  /* Should fail... */
+               FAIL_IF(getppid_tm(false, false) != -1);  /* Should fail... */
                FAIL_IF(!failure_is_persistent()); /* ...persistently... */
                FAIL_IF(!failure_is_syscall());    /* ...with code syscall. */
+
+               /* Now do it all again with scv if it is available. */
+               if (have_hwcap2(PPC_FEATURE2_SCV)) {
+                       FAIL_IF(getppid_tm(true, true) == -1); /* Should succeed. */
+                       FAIL_IF(getppid_tm(true, false) != -1);  /* Should fail... */
+                       FAIL_IF(!failure_is_persistent()); /* ...persistently... */
+                       FAIL_IF(!failure_is_syscall());    /* ...with code syscall. */
+               }
+
                gettimeofday(&now, 0);
        }