Merge tag 'irqchip-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm...
authorThomas Gleixner <tglx@linutronix.de>
Sat, 18 Feb 2023 23:07:56 +0000 (00:07 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Sat, 18 Feb 2023 23:07:56 +0000 (00:07 +0100)
Pull irqchip updates from Marc Zyngier:

   - New and improved irqdomain locking, closing a number of races that
     became apparent now that we are able to probe drivers in parallel

   - A bunch of OF node refcounting bugs have been fixed

   - We now have a new IPI mux, lifted from the Apple AIC code and
     made common. It is expected that riscv will eventually benefit
     from it

   - Two small fixes for the Broadcom L2 drivers

   - Various cleanups and minor bug fixes

Link: https://lore.kernel.org/r/20230218143452.3817627-1-maz@kernel.org
410 files changed:
.mailmap
Documentation/arm64/silicon-errata.rst
Documentation/conf.py
Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-aes.yaml
Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-sha.yaml
Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-tdes.yaml
Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml
Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml
Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml
Documentation/devicetree/bindings/display/msm/qcom,qcm2290-mdss.yaml
Documentation/devicetree/bindings/display/msm/qcom,sm6115-mdss.yaml
Documentation/devicetree/bindings/sound/mt8186-mt6366-rt1019-rt5682s.yaml
Documentation/devicetree/bindings/sound/qcom,lpass-tx-macro.yaml
Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml
Documentation/devicetree/bindings/spi/atmel,at91rm9200-spi.yaml
Documentation/devicetree/bindings/spi/atmel,quadspi.yaml
Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
Documentation/networking/rxrpc.rst
Documentation/sphinx/load_config.py
Documentation/virt/kvm/api.rst
Documentation/virt/kvm/locking.rst
MAINTAINERS
Makefile
arch/arm64/Kconfig
arch/arm64/include/asm/atomic_ll_sc.h
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/esr.h
arch/arm64/include/asm/hugetlb.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/uprobes.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/efi-rt-wrapper.S
arch/arm64/kernel/elfcore.c
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/signal.c
arch/arm64/kvm/hyp/include/hyp/fault.h
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/mmu.c
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/vgic/vgic-v3.c
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/mmu.c
arch/arm64/tools/cpucaps
arch/ia64/kernel/elfcore.c
arch/powerpc/boot/wrapper
arch/powerpc/include/asm/imc-pmu.h
arch/powerpc/mm/book3s64/hash_utils.c
arch/powerpc/perf/imc-pmu.c
arch/s390/boot/decompressor.c
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/cpu_mf.h
arch/s390/include/asm/debug.h
arch/s390/include/asm/percpu.h
arch/s390/kernel/machine_kexec_file.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/vmlinux.lds.S
arch/s390/kvm/interrupt.c
arch/sh/include/asm/pgtable-3level.h
arch/x86/boot/bioscall.S
arch/x86/events/intel/cstate.c
arch/x86/events/intel/uncore.c
arch/x86/events/msr.c
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/resctrl/monitor.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/xen.c
arch/x86/mm/init.c
arch/x86/mm/pat/memtype.c
arch/x86/pci/mmconfig-shared.c
arch/x86/platform/uv/uv_irq.c
arch/x86/um/elfcore.c
arch/x86/xen/p2m.c
arch/xtensa/include/asm/processor.h
arch/xtensa/kernel/traps.c
arch/xtensa/mm/fault.c
block/blk-core.c
drivers/acpi/glue.c
drivers/acpi/resource.c
drivers/acpi/scan.c
drivers/acpi/video_detect.c
drivers/ata/Kconfig
drivers/block/xen-blkback/xenbus.c
drivers/block/xen-blkfront.c
drivers/char/tpm/xen-tpmfront.c
drivers/cpufreq/amd-pstate.c
drivers/cpufreq/apple-soc-cpufreq.c
drivers/cpufreq/armada-37xx-cpufreq.c
drivers/cpufreq/cppc_cpufreq.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/qcom-cpufreq-hw.c
drivers/crypto/atmel-ecc.c
drivers/crypto/atmel-i2c.c
drivers/crypto/atmel-i2c.h
drivers/edac/edac_device.c
drivers/edac/edac_module.h
drivers/edac/highbank_mc_edac.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/runtime-wrappers.c
drivers/firmware/google/coreboot_table.c
drivers/firmware/google/coreboot_table.h
drivers/firmware/psci/psci.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/drm_buddy.c
drivers/gpu/drm/drm_panel_orientation_quirks.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gt/intel_gt_regs.h
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
drivers/gpu/drm/msm/adreno/adreno_gpu.h
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
drivers/gpu/drm/msm/dp/dp_aux.c
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_mdss.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c [deleted file]
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/vmwgfx/ttm_object.c
drivers/gpu/drm/vmwgfx/ttm_object.h
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/xen/xen_drm_front.c
drivers/input/misc/xen-kbdfront.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
drivers/iommu/arm/arm-smmu/arm-smmu.c
drivers/iommu/iommu.c
drivers/iommu/iova.c
drivers/iommu/mtk_iommu_v1.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-alpine-msi.c
drivers/irqchip/irq-apple-aic.c
drivers/irqchip/irq-aspeed-scu-ic.c
drivers/irqchip/irq-bcm7120-l2.c
drivers/irqchip/irq-brcmstb-l2.c
drivers/irqchip/irq-gic-v2m.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3-mbi.c
drivers/irqchip/irq-loongson-liointc.c
drivers/irqchip/irq-loongson-pch-msi.c
drivers/irqchip/irq-mvebu-gicp.c
drivers/irqchip/irq-mvebu-odmi.c
drivers/irqchip/irq-ti-sci-intr.c
drivers/irqchip/irqchip.c
drivers/mtd/parsers/scpart.c
drivers/mtd/parsers/tplink_safeloader.c
drivers/mtd/spi-nor/core.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/ice/ice_gnss.c
drivers/net/ethernet/intel/igc/igc_defines.h
drivers/net/ethernet/intel/igc/igc_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
drivers/net/ethernet/marvell/octeontx2/af/cgx.h
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ipa/data/ipa_data-v4.7.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/r8152.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/nfc/pn533/usb.c
drivers/nvme/host/apple.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/pci.c
drivers/pci/controller/dwc/Kconfig
drivers/pci/xen-pcifront.c
drivers/platform/surface/aggregator/controller.c
drivers/platform/surface/aggregator/ssh_request_layer.c
drivers/platform/x86/amd/pmc.c
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/asus-wmi.h
drivers/platform/x86/dell/dell-wmi-privacy.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel/int3472/clk_and_regulator.c
drivers/platform/x86/intel/int3472/discrete.c
drivers/platform/x86/intel/pmc/core.c
drivers/platform/x86/simatic-ipc.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/touchscreen_dmi.c
drivers/regulator/da9211-regulator.c
drivers/regulator/qcom-rpmh-regulator.c
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/mpi3mr/Makefile
drivers/scsi/mpi3mr/mpi3mr_fw.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/storvsc_drv.c
drivers/scsi/xen-scsifront.c
drivers/spi/spi-cadence-xspi.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi.c
drivers/spi/spidev.c
drivers/tty/hvc/hvc_xen.c
drivers/ufs/core/ufshcd.c
drivers/usb/host/xen-hcd.c
drivers/video/fbdev/xen-fbfront.c
drivers/xen/pvcalls-back.c
drivers/xen/pvcalls-front.c
drivers/xen/xen-pciback/xenbus.c
drivers/xen/xen-scsiback.c
fs/afs/cmservice.c
fs/afs/rxrpc.c
fs/binfmt_elf.c
fs/binfmt_elf_fdpic.c
fs/cifs/cifsencrypt.c
fs/cifs/connect.c
fs/cifs/dfs_cache.c
fs/cifs/link.c
fs/cifs/sess.c
fs/cifs/smb1ops.c
fs/cifs/smb2pdu.c
fs/ksmbd/auth.c
fs/ksmbd/connection.c
fs/ksmbd/smb2pdu.c
fs/ksmbd/transport_tcp.c
fs/nfsd/filecache.c
fs/nfsd/filecache.h
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfsproc.c
fs/nfsd/trace.h
fs/xfs/libxfs/xfs_btree.c
fs/xfs/xfs_extent_busy.c
fs/xfs/xfs_icache.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_qm.c
fs/xfs/xfs_reflink.c
include/acpi/acpi_bus.h
include/linux/elfcore.h
include/linux/irq.h
include/linux/irqdomain.h
include/linux/mlx5/driver.h
include/linux/mtd/spi-nor.h
include/linux/platform_data/x86/simatic-ipc.h
include/linux/tpm_eventlog.h
include/net/af_rxrpc.h
include/scsi/scsi_transport_iscsi.h
include/trace/events/rxrpc.h
include/uapi/linux/psci.h
include/xen/xenbus.h
init/Kconfig
init/Makefile
io_uring/fdinfo.c
io_uring/io-wq.c
io_uring/poll.c
io_uring/rw.c
kernel/irq/Kconfig
kernel/irq/Makefile
kernel/irq/ipi-mux.c [new file with mode: 0644]
kernel/irq/irqdomain.c
kernel/kallsyms_selftest.c
kernel/kcsan/kcsan_test.c
kernel/sched/core.c
kernel/time/tick-oneshot.c
kernel/time/time.c
kernel/time/timekeeping.c
lib/lockref.c
mm/memblock.c
net/9p/trans_xen.c
net/core/gro.c
net/ipv6/raw.c
net/rxrpc/Makefile
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_event.c
net/rxrpc/call_object.c
net/rxrpc/call_state.c [new file with mode: 0644]
net/rxrpc/conn_client.c
net/rxrpc/conn_event.c
net/rxrpc/conn_object.c
net/rxrpc/conn_service.c
net/rxrpc/input.c
net/rxrpc/insecure.c
net/rxrpc/io_thread.c
net/rxrpc/local_object.c
net/rxrpc/net_ns.c
net/rxrpc/output.c
net/rxrpc/peer_object.c
net/rxrpc/proc.c
net/rxrpc/recvmsg.c
net/rxrpc/rxkad.c
net/rxrpc/rxperf.c
net/rxrpc/security.c
net/rxrpc/sendmsg.c
net/sched/act_mpls.c
net/sched/sch_api.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcsock.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/tipc/node.c
scripts/Makefile.vmlinux
sound/core/control.c
sound/core/control_led.c
sound/pci/hda/cs35l41_hda.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/yc/acp6x-mach.c
sound/soc/codecs/rt9120.c
sound/soc/codecs/wm8904.c
sound/soc/fsl/fsl-asoc-card.c
sound/soc/fsl/fsl_micfil.c
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/boards/Kconfig
sound/soc/intel/boards/sof_nau8825.c
sound/soc/intel/common/soc-acpi-intel-adl-match.c
sound/soc/intel/common/soc-acpi-intel-rpl-match.c
sound/soc/mediatek/Kconfig
sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
sound/soc/qcom/Kconfig
sound/soc/qcom/Makefile
sound/soc/qcom/common.c
sound/soc/qcom/common.h
sound/soc/qcom/lpass-cpu.c
sound/soc/qcom/sc8280xp.c
sound/soc/qcom/sdw.c [new file with mode: 0644]
sound/soc/qcom/sdw.h [new file with mode: 0644]
sound/soc/qcom/sm8250.c
sound/soc/sof/debug.c
sound/soc/sof/pm.c
sound/usb/implicit.c
sound/usb/pcm.c
sound/usb/stream.c
sound/xen/xen_snd_front.c
tools/include/nolibc/arch-mips.h
tools/include/nolibc/arch-riscv.h
tools/include/nolibc/ctype.h
tools/include/nolibc/errno.h
tools/include/nolibc/signal.h
tools/include/nolibc/stdio.h
tools/include/nolibc/stdlib.h
tools/include/nolibc/string.h
tools/include/nolibc/sys.h
tools/include/nolibc/time.h
tools/include/nolibc/types.h
tools/include/nolibc/unistd.h
tools/objtool/check.c
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/builtin-kmem.c
tools/perf/builtin-trace.c
tools/perf/tests/bpf-script-test-prologue.c
tools/perf/tests/make
tools/perf/util/auxtrace.c
tools/perf/util/bpf_counter.h
tools/testing/memblock/internal.h
tools/testing/selftests/net/af_unix/test_unix_oob.c
tools/testing/selftests/net/l2_tos_ttl_inherit.sh
virt/kvm/kvm_main.c

index ccba4cf..562f70d 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -422,6 +422,7 @@ Tony Luck <tony.luck@intel.com>
 TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
 TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
 Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
+Tudor Ambarus <tudor.ambarus@linaro.org> <tudor.ambarus@microchip.com>
 Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
 Tzung-Bi Shih <tzungbi@kernel.org> <tzungbi@google.com>
 Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
index 808ade4..ec5f889 100644 (file)
@@ -120,6 +120,8 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A710     | #2224489        | ARM64_ERRATUM_2224489       |
 +----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Cortex-A715     | #2645198        | ARM64_ERRATUM_2645198       |
++----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-X2       | #2119858        | ARM64_ERRATUM_2119858       |
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-X2       | #2224489        | ARM64_ERRATUM_2224489       |
index a5c45df..d927737 100644 (file)
@@ -31,6 +31,12 @@ def have_command(cmd):
 # Get Sphinx version
 major, minor, patch = sphinx.version_info[:3]
 
+#
+# Warn about older versions that we don't want to support for much
+# longer.
+#
+if (major < 2) or (major == 2 and minor < 4):
+    print('WARNING: support for Sphinx < 2.4 will be removed soon.')
 
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
@@ -339,7 +345,11 @@ html_use_smartypants = False
 
 # Custom sidebar templates, maps document names to template names.
 # Note that the RTD theme ignores this
-html_sidebars = { '**': ["about.html", 'searchbox.html', 'localtoc.html', 'sourcelink.html']}
+html_sidebars = { '**': ['searchbox.html', 'localtoc.html', 'sourcelink.html']}
+
+# about.html is available for alabaster theme. Add it at the front.
+if html_theme == 'alabaster':
+    html_sidebars['**'].insert(0, 'about.html')
 
 # Output file base name for HTML help builder.
 htmlhelp_basename = 'TheLinuxKerneldoc'
index 903b311..99e159b 100644 (file)
@@ -54,6 +54,17 @@ properties:
       - const: xo
       - const: alternate
 
+  interrupts:
+    minItems: 1
+    maxItems: 3
+
+  interrupt-names:
+    minItems: 1
+    items:
+      - const: dcvsh-irq-0
+      - const: dcvsh-irq-1
+      - const: dcvsh-irq-2
+
   '#freq-domain-cells':
     const: 1
 
index 0ccaab1..0b7383b 100644 (file)
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Atmel Advanced Encryption Standard (AES) HW cryptographic accelerator
 
 maintainers:
-  - Tudor Ambarus <tudor.ambarus@microchip.com>
+  - Tudor Ambarus <tudor.ambarus@linaro.org>
 
 properties:
   compatible:
index 5163c51..ee2ffb0 100644 (file)
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Atmel Secure Hash Algorithm (SHA) HW cryptographic accelerator
 
 maintainers:
-  - Tudor Ambarus <tudor.ambarus@microchip.com>
+  - Tudor Ambarus <tudor.ambarus@linaro.org>
 
 properties:
   compatible:
index fcc5adf..3d6ed24 100644 (file)
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Atmel Triple Data Encryption Standard (TDES) HW cryptographic accelerator
 
 maintainers:
-  - Tudor Ambarus <tudor.ambarus@microchip.com>
+  - Tudor Ambarus <tudor.ambarus@linaro.org>
 
 properties:
   compatible:
index f2c1437..6e2fd6e 100644 (file)
@@ -32,7 +32,7 @@ properties:
       - description: Display byte clock
       - description: Display byte interface clock
       - description: Display pixel clock
-      - description: Display escape clock
+      - description: Display core clock
       - description: Display AHB clock
       - description: Display AXI clock
 
@@ -137,8 +137,6 @@ required:
   - phys
   - assigned-clocks
   - assigned-clock-parents
-  - power-domains
-  - operating-points-v2
   - ports
 
 additionalProperties: false
index 3d8540a..2f1fd14 100644 (file)
@@ -34,6 +34,10 @@ properties:
   vddio-supply:
     description: Phandle to vdd-io regulator device node.
 
+  qcom,dsi-phy-regulator-ldo-mode:
+    type: boolean
+    description: Indicates if the LDO mode PHY regulator is wanted.
+
 required:
   - compatible
   - reg
index d6f043a..4795e13 100644 (file)
@@ -72,7 +72,7 @@ examples:
     #include <dt-bindings/interconnect/qcom,qcm2290.h>
     #include <dt-bindings/power/qcom-rpmpd.h>
 
-    mdss@5e00000 {
+    display-subsystem@5e00000 {
         #address-cells = <1>;
         #size-cells = <1>;
         compatible = "qcom,qcm2290-mdss";
index a86d7f5..886858e 100644 (file)
@@ -62,7 +62,7 @@ examples:
     #include <dt-bindings/interrupt-controller/arm-gic.h>
     #include <dt-bindings/power/qcom-rpmpd.h>
 
-    mdss@5e00000 {
+    display-subsystem@5e00000 {
         #address-cells = <1>;
         #size-cells = <1>;
         compatible = "qcom,sm6115-mdss";
index 9d31399..aa23b00 100644 (file)
@@ -16,6 +16,7 @@ properties:
   compatible:
     enum:
       - mediatek,mt8186-mt6366-rt1019-rt5682s-sound
+      - mediatek,mt8186-mt6366-rt5682s-max98360-sound
 
   mediatek,platform:
     $ref: "/schemas/types.yaml#/definitions/phandle"
index 66431aa..da5f709 100644 (file)
@@ -30,7 +30,9 @@ properties:
     const: 0
 
   clocks:
-    maxItems: 5
+    oneOf:
+      - maxItems: 3
+      - maxItems: 5
 
   clock-names:
     oneOf:
index 2bf8d08..66cbb1f 100644 (file)
@@ -9,9 +9,6 @@ title: LPASS(Low Power Audio Subsystem) VA Macro audio codec
 maintainers:
   - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
 
-allOf:
-  - $ref: dai-common.yaml#
-
 properties:
   compatible:
     enum:
@@ -30,15 +27,12 @@ properties:
     const: 0
 
   clocks:
-    maxItems: 5
+    minItems: 5
+    maxItems: 6
 
   clock-names:
-    items:
-      - const: mclk
-      - const: npl
-      - const: macro
-      - const: dcodec
-      - const: fsgen
+    minItems: 5
+    maxItems: 6
 
   clock-output-names:
     maxItems: 1
@@ -55,10 +49,51 @@ required:
   - reg
   - "#sound-dai-cells"
 
+allOf:
+  - $ref: dai-common.yaml#
+
+  - if:
+      properties:
+        compatible:
+          enum:
+            - qcom,sc7280-lpass-wsa-macro
+            - qcom,sm8450-lpass-wsa-macro
+            - qcom,sc8280xp-lpass-wsa-macro
+    then:
+      properties:
+        clocks:
+          maxItems: 5
+        clock-names:
+          items:
+            - const: mclk
+            - const: npl
+            - const: macro
+            - const: dcodec
+            - const: fsgen
+
+  - if:
+      properties:
+        compatible:
+          enum:
+            - qcom,sm8250-lpass-wsa-macro
+    then:
+      properties:
+        clocks:
+          minItems: 6
+        clock-names:
+          items:
+            - const: mclk
+            - const: npl
+            - const: macro
+            - const: dcodec
+            - const: va
+            - const: fsgen
+
 unevaluatedProperties: false
 
 examples:
   - |
+    #include <dt-bindings/clock/qcom,sm8250-lpass-aoncc.h>
     #include <dt-bindings/sound/qcom,q6afe.h>
     codec@3240000 {
       compatible = "qcom,sm8250-lpass-wsa-macro";
@@ -69,7 +104,8 @@ examples:
                <&audiocc 0>,
                <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
                <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+               <&aoncc LPASS_CDC_VA_MCLK>,
                <&vamacro>;
-      clock-names = "mclk", "npl", "macro", "dcodec", "fsgen";
+      clock-names = "mclk", "npl", "macro", "dcodec", "va", "fsgen";
       clock-output-names = "mclk";
     };
index 4dd973e..6c57dd6 100644 (file)
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Atmel SPI device
 
 maintainers:
-  - Tudor Ambarus <tudor.ambarus@microchip.com>
+  - Tudor Ambarus <tudor.ambarus@linaro.org>
 
 allOf:
   - $ref: spi-controller.yaml#
index 1d493ad..b0d99bc 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Atmel Quad Serial Peripheral Interface (QSPI)
 
 maintainers:
-  - Tudor Ambarus <tudor.ambarus@microchip.com>
+  - Tudor Ambarus <tudor.ambarus@linaro.org>
 
 allOf:
   - $ref: spi-controller.yaml#
index ead2ccc..9a60c06 100644 (file)
@@ -44,9 +44,9 @@ properties:
     description:
       Maximum SPI clocking speed of the device in Hz.
 
-  spi-cs-setup-ns:
+  spi-cs-setup-delay-ns:
     description:
-      Delay in nanosecods to be introduced by the controller after CS is
+      Delay in nanoseconds to be introduced by the controller after CS is
       asserted.
 
   spi-rx-bus-width:
index 39494a6..e1af544 100644 (file)
@@ -880,8 +880,8 @@ The kernel interface functions are as follows:
 
      notify_end_rx can be NULL or it can be used to specify a function to be
      called when the call changes state to end the Tx phase.  This function is
-     called with the call-state spinlock held to prevent any reply or final ACK
-     from being delivered first.
+     called with a spinlock held to prevent the last DATA packet from being
+     transmitted until the function returns.
 
  (#) Receive data from a call::
 
index eeb394b..8b416bf 100644 (file)
@@ -3,7 +3,7 @@
 
 import os
 import sys
-from sphinx.util.pycompat import execfile_
+from sphinx.util.osutil import fs_encoding
 
 # ------------------------------------------------------------------------------
 def loadConfig(namespace):
@@ -48,7 +48,9 @@ def loadConfig(namespace):
             sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
             config = namespace.copy()
             config['__file__'] = config_file
-            execfile_(config_file, config)
+            with open(config_file, 'rb') as f:
+                code = compile(f.read(), fs_encoding, 'exec')
+                exec(code, config)
             del config['__file__']
             namespace.update(config)
         else:
index deb494f..9807b05 100644 (file)
@@ -1354,6 +1354,14 @@ the memory region are automatically reflected into the guest.  For example, an
 mmap() that affects the region will be made visible immediately.  Another
 example is madvise(MADV_DROP).
 
+Note: On arm64, a write generated by the page-table walker (to update
+the Access and Dirty flags, for example) never results in a
+KVM_EXIT_MMIO exit when the slot has the KVM_MEM_READONLY flag. This
+is because KVM cannot provide the data that would be written by the
+page-table walker, making it impossible to emulate the access.
+Instead, an abort (data abort if the cause of the page-table update
+was a load or a store, instruction abort if it was an instruction
+fetch) is injected in the guest.
 
 4.36 KVM_SET_TSS_ADDR
 ---------------------
@@ -8310,6 +8318,20 @@ CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``
 It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel
 has enabled in-kernel emulation of the local APIC.
 
+CPU topology
+~~~~~~~~~~~~
+
+Several CPUID values include topology information for the host CPU:
+0x0b and 0x1f for Intel systems, 0x8000001e for AMD systems.  Different
+versions of KVM return different values for this information and userspace
+should not rely on it.  Currently they return all zeroes.
+
+If userspace wishes to set up a guest topology, it should be careful that
+the values of these three leaves differ for each CPU.  In particular,
+the APIC ID is found in EDX for all subleaves of 0x0b and 0x1f, and in EAX
+for 0x8000001e; the latter also encodes the core id and node id in bits
+7:0 of EBX and ECX respectively.
+
 Obsolete ioctls and capabilities
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
index a3ca76f..a014679 100644 (file)
@@ -24,21 +24,22 @@ The acquisition orders for mutexes are as follows:
 
 For SRCU:
 
-- ``synchronize_srcu(&kvm->srcu)`` is called _inside_
-  the kvm->slots_lock critical section, therefore kvm->slots_lock
-  cannot be taken inside a kvm->srcu read-side critical section.
-  Instead, kvm->slots_arch_lock is released before the call
-  to ``synchronize_srcu()`` and _can_ be taken inside a
-  kvm->srcu read-side critical section.
-
-- kvm->lock is taken inside kvm->srcu, therefore
-  ``synchronize_srcu(&kvm->srcu)`` cannot be called inside
-  a kvm->lock critical section.  If you cannot delay the
-  call until after kvm->lock is released, use ``call_srcu``.
+- ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections
+  for kvm->lock, vcpu->mutex and kvm->slots_lock.  These locks _cannot_
+  be taken inside a kvm->srcu read-side critical section; that is, the
+  following is broken::
+
+      srcu_read_lock(&kvm->srcu);
+      mutex_lock(&kvm->slots_lock);
+
+- kvm->slots_arch_lock instead is released before the call to
+  ``synchronize_srcu()``.  It _can_ therefore be taken inside a
+  kvm->srcu read-side critical section, for example while processing
+  a vmexit.
 
 On x86:
 
-- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
+- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock and kvm->arch.xen.xen_lock
 
 - kvm->arch.mmu_lock is an rwlock.  kvm->arch.tdp_mmu_pages_lock and
   kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
index 9a07bd4..cb47488 100644 (file)
@@ -11358,9 +11358,9 @@ F:      virt/kvm/*
 KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
 M:     Marc Zyngier <maz@kernel.org>
 R:     James Morse <james.morse@arm.com>
-R:     Alexandru Elisei <alexandru.elisei@arm.com>
 R:     Suzuki K Poulose <suzuki.poulose@arm.com>
 R:     Oliver Upton <oliver.upton@linux.dev>
+R:     Zenghui Yu <yuzenghui@huawei.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     kvmarm@lists.linux.dev
 L:     kvmarm@lists.cs.columbia.edu (deprecated, moderated for non-subscribers)
@@ -13622,7 +13622,7 @@ F:      arch/microblaze/
 
 MICROCHIP AT91 DMA DRIVERS
 M:     Ludovic Desroches <ludovic.desroches@microchip.com>
-M:     Tudor Ambarus <tudor.ambarus@microchip.com>
+M:     Tudor Ambarus <tudor.ambarus@linaro.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     dmaengine@vger.kernel.org
 S:     Supported
@@ -13667,7 +13667,7 @@ F:      Documentation/devicetree/bindings/media/microchip,csi2dc.yaml
 F:     drivers/media/platform/microchip/microchip-csi2dc.c
 
 MICROCHIP ECC DRIVER
-M:     Tudor Ambarus <tudor.ambarus@microchip.com>
+M:     Tudor Ambarus <tudor.ambarus@linaro.org>
 L:     linux-crypto@vger.kernel.org
 S:     Maintained
 F:     drivers/crypto/atmel-ecc.*
@@ -13764,7 +13764,7 @@ S:      Maintained
 F:     drivers/mmc/host/atmel-mci.c
 
 MICROCHIP NAND DRIVER
-M:     Tudor Ambarus <tudor.ambarus@microchip.com>
+M:     Tudor Ambarus <tudor.ambarus@linaro.org>
 L:     linux-mtd@lists.infradead.org
 S:     Supported
 F:     Documentation/devicetree/bindings/mtd/atmel-nand.txt
@@ -13816,7 +13816,7 @@ S:      Supported
 F:     drivers/power/reset/at91-sama5d2_shdwc.c
 
 MICROCHIP SPI DRIVER
-M:     Tudor Ambarus <tudor.ambarus@microchip.com>
+M:     Tudor Ambarus <tudor.ambarus@linaro.org>
 S:     Supported
 F:     drivers/spi/spi-atmel.*
 
@@ -14921,7 +14921,8 @@ T:      git://git.infradead.org/nvme.git
 F:     Documentation/nvme/
 F:     drivers/nvme/host/
 F:     drivers/nvme/common/
-F:     include/linux/nvme*
+F:     include/linux/nvme.h
+F:     include/linux/nvme-*.h
 F:     include/uapi/linux/nvme_ioctl.h
 
 NVM EXPRESS FABRICS AUTHENTICATION
@@ -19674,7 +19675,7 @@ F:      drivers/clk/spear/
 F:     drivers/pinctrl/spear/
 
 SPI NOR SUBSYSTEM
-M:     Tudor Ambarus <tudor.ambarus@microchip.com>
+M:     Tudor Ambarus <tudor.ambarus@linaro.org>
 M:     Pratyush Yadav <pratyush@kernel.org>
 R:     Michael Walle <michael@walle.cc>
 L:     linux-mtd@lists.infradead.org
index 4607163..e09fe10 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
index 0393480..c5ccca2 100644 (file)
@@ -184,8 +184,6 @@ config ARM64
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_CONTIGUOUS
        select HAVE_DYNAMIC_FTRACE
-       select HAVE_DYNAMIC_FTRACE_WITH_ARGS \
-               if $(cc-option,-fpatchable-function-entry=2)
        select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
                if DYNAMIC_FTRACE_WITH_ARGS
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -972,6 +970,22 @@ config ARM64_ERRATUM_2457168
 
          If unsure, say Y.
 
+config ARM64_ERRATUM_2645198
+       bool "Cortex-A715: 2645198: Workaround possible [ESR|FAR]_ELx corruption"
+       default y
+       help
+         This option adds the workaround for ARM Cortex-A715 erratum 2645198.
+
+         If a Cortex-A715 cpu sees a page mapping permissions change from executable
+         to non-executable, it may corrupt the ESR_ELx and FAR_ELx registers on the
+         next instruction abort caused by permission fault.
+
+         Only user-space does executable to non-executable permission transition via
+         mprotect() system call. Workaround the problem by doing a break-before-make
+         TLB invalidation, for all changes to executable user space mappings.
+
+         If unsure, say Y.
+
 config CAVIUM_ERRATUM_22375
        bool "Cavium erratum 22375, 24313"
        default y
index 0890e4f..cbb3d96 100644 (file)
@@ -315,7 +315,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1,                   \
        "       cbnz    %w0, 1b\n"                                      \
        "       " #mb "\n"                                              \
        "2:"                                                            \
-       : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr)        \
+       : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr)          \
        : "r" (old1), "r" (old2), "r" (new1), "r" (new2)                \
        : cl);                                                          \
                                                                        \
index 52075e9..a94d6da 100644 (file)
@@ -311,7 +311,7 @@ __lse__cmpxchg_double##name(unsigned long old1,                             \
        "       eor     %[old2], %[old2], %[oldval2]\n"                 \
        "       orr     %[old1], %[old1], %[old2]"                      \
        : [old1] "+&r" (x0), [old2] "+&r" (x1),                         \
-         [v] "+Q" (*(unsigned long *)ptr)                              \
+         [v] "+Q" (*(__uint128_t *)ptr)                                \
        : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),             \
          [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)              \
        : cl);                                                          \
index 4e8b66c..683ca3a 100644 (file)
 #define APPLE_CPU_PART_M1_FIRESTORM_PRO        0x025
 #define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028
 #define APPLE_CPU_PART_M1_FIRESTORM_MAX        0x029
+#define APPLE_CPU_PART_M2_BLIZZARD     0x032
+#define APPLE_CPU_PART_M2_AVALANCHE    0x033
 
 #define AMPERE_CPU_PART_AMPERE1                0xAC3
 
 #define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO)
 #define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX)
 #define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
+#define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD)
+#define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE)
 #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
 
 /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
index 15b34fb..206de10 100644 (file)
 #define ESR_ELx_FSC_ACCESS     (0x08)
 #define ESR_ELx_FSC_FAULT      (0x04)
 #define ESR_ELx_FSC_PERM       (0x0C)
+#define ESR_ELx_FSC_SEA_TTW0   (0x14)
+#define ESR_ELx_FSC_SEA_TTW1   (0x15)
+#define ESR_ELx_FSC_SEA_TTW2   (0x16)
+#define ESR_ELx_FSC_SEA_TTW3   (0x17)
+#define ESR_ELx_FSC_SECC       (0x18)
+#define ESR_ELx_FSC_SECC_TTW0  (0x1c)
+#define ESR_ELx_FSC_SECC_TTW1  (0x1d)
+#define ESR_ELx_FSC_SECC_TTW2  (0x1e)
+#define ESR_ELx_FSC_SECC_TTW3  (0x1f)
 
 /* ISS field definitions for Data Aborts */
 #define ESR_ELx_ISV_SHIFT      (24)
index d20f5da..6a4a1ab 100644 (file)
@@ -49,6 +49,15 @@ extern pte_t huge_ptep_get(pte_t *ptep);
 
 void __init arm64_hugetlb_cma_reserve(void);
 
+#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
+extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep);
+
+#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
+extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep,
+                                        pte_t old_pte, pte_t new_pte);
+
 #include <asm-generic/hugetlb.h>
 
 #endif /* __ASM_HUGETLB_H */
index 0df3fc3..26b0c97 100644 (file)
                                 BIT(18) |              \
                                 GENMASK(16, 15))
 
-/* For compatibility with fault code shared with 32-bit */
-#define FSC_FAULT      ESR_ELx_FSC_FAULT
-#define FSC_ACCESS     ESR_ELx_FSC_ACCESS
-#define FSC_PERM       ESR_ELx_FSC_PERM
-#define FSC_SEA                ESR_ELx_FSC_EXTABT
-#define FSC_SEA_TTW0   (0x14)
-#define FSC_SEA_TTW1   (0x15)
-#define FSC_SEA_TTW2   (0x16)
-#define FSC_SEA_TTW3   (0x17)
-#define FSC_SECC       (0x18)
-#define FSC_SECC_TTW0  (0x1c)
-#define FSC_SECC_TTW1  (0x1d)
-#define FSC_SECC_TTW2  (0x1e)
-#define FSC_SECC_TTW3  (0x1f)
-
 /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
 #define HPFAR_MASK     (~UL(0xf))
 /*
index 9bdba47..193583d 100644 (file)
@@ -349,16 +349,16 @@ static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *v
 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
 {
        switch (kvm_vcpu_trap_get_fault(vcpu)) {
-       case FSC_SEA:
-       case FSC_SEA_TTW0:
-       case FSC_SEA_TTW1:
-       case FSC_SEA_TTW2:
-       case FSC_SEA_TTW3:
-       case FSC_SECC:
-       case FSC_SECC_TTW0:
-       case FSC_SECC_TTW1:
-       case FSC_SECC_TTW2:
-       case FSC_SECC_TTW3:
+       case ESR_ELx_FSC_EXTABT:
+       case ESR_ELx_FSC_SEA_TTW0:
+       case ESR_ELx_FSC_SEA_TTW1:
+       case ESR_ELx_FSC_SEA_TTW2:
+       case ESR_ELx_FSC_SEA_TTW3:
+       case ESR_ELx_FSC_SECC:
+       case ESR_ELx_FSC_SECC_TTW0:
+       case ESR_ELx_FSC_SECC_TTW1:
+       case ESR_ELx_FSC_SECC_TTW2:
+       case ESR_ELx_FSC_SECC_TTW3:
                return true;
        default:
                return false;
@@ -373,8 +373,26 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
 
 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
 {
-       if (kvm_vcpu_abt_iss1tw(vcpu))
-               return true;
+       if (kvm_vcpu_abt_iss1tw(vcpu)) {
+               /*
+                * Only a permission fault on a S1PTW should be
+                * considered as a write. Otherwise, page tables baked
+                * in a read-only memslot will result in an exception
+                * being delivered in the guest.
+                *
+                * The drawback is that we end-up faulting twice if the
+                * guest is using any of HW AF/DB: a translation fault
+                * to map the page containing the PT (read only at
+                * first), then a permission fault to allow the flags
+                * to be set.
+                */
+               switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
+               case ESR_ELx_FSC_PERM:
+                       return true;
+               default:
+                       return false;
+               }
+       }
 
        if (kvm_vcpu_trap_is_iabt(vcpu))
                return false;
index b4bbeed..65e7899 100644 (file)
@@ -681,7 +681,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 #define pud_leaf(pud)          (pud_present(pud) && !pud_table(pud))
 #define pud_valid(pud)         pte_valid(pud_pte(pud))
 #define pud_user(pud)          pte_user(pud_pte(pud))
-
+#define pud_user_exec(pud)     pte_user_exec(pud_pte(pud))
 
 static inline void set_pud(pud_t *pudp, pud_t pud)
 {
@@ -730,6 +730,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
 #else
 
 #define pud_page_paddr(pud)    ({ BUILD_BUG(); 0; })
+#define pud_user_exec(pud)     pud_user(pud) /* Always 0 with folding */
 
 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
 #define pmd_set_fixmap(addr)           NULL
@@ -862,12 +863,12 @@ static inline bool pte_user_accessible_page(pte_t pte)
 
 static inline bool pmd_user_accessible_page(pmd_t pmd)
 {
-       return pmd_leaf(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
+       return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
 }
 
 static inline bool pud_user_accessible_page(pud_t pud)
 {
-       return pud_leaf(pud) && pud_user(pud);
+       return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud));
 }
 #endif
 
@@ -1093,6 +1094,15 @@ static inline bool pud_sect_supported(void)
 }
 
 
+#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
+#define ptep_modify_prot_start ptep_modify_prot_start
+extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
+                                   unsigned long addr, pte_t *ptep);
+
+#define ptep_modify_prot_commit ptep_modify_prot_commit
+extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
+                                   unsigned long addr, pte_t *ptep,
+                                   pte_t old_pte, pte_t new_pte);
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_PGTABLE_H */
index ba4bff5..2b09495 100644 (file)
@@ -16,7 +16,7 @@
 #define UPROBE_SWBP_INSN_SIZE  AARCH64_INSN_SIZE
 #define UPROBE_XOL_SLOT_BYTES  MAX_UINSN_BYTES
 
-typedef u32 uprobe_opcode_t;
+typedef __le32 uprobe_opcode_t;
 
 struct arch_uprobe_task {
 };
index 89ac000..307faa2 100644 (file)
@@ -661,6 +661,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
        },
 #endif
+#ifdef CONFIG_ARM64_ERRATUM_2645198
+       {
+               .desc = "ARM erratum 2645198",
+               .capability = ARM64_WORKAROUND_2645198,
+               ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715)
+       },
+#endif
 #ifdef CONFIG_ARM64_ERRATUM_2077057
        {
                .desc = "ARM erratum 2077057",
index a008864..d872d18 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/assembler.h>
 
 SYM_FUNC_START(__efi_rt_asm_wrapper)
        stp     x29, x30, [sp, #-112]!
index 353009d..2e94d20 100644 (file)
@@ -8,28 +8,27 @@
 #include <asm/cpufeature.h>
 #include <asm/mte.h>
 
-#define for_each_mte_vma(vmi, vma)                                     \
+#define for_each_mte_vma(cprm, i, m)                                   \
        if (system_supports_mte())                                      \
-               for_each_vma(vmi, vma)                                  \
-                       if (vma->vm_flags & VM_MTE)
+               for (i = 0, m = cprm->vma_meta;                         \
+                    i < cprm->vma_count;                               \
+                    i++, m = cprm->vma_meta + i)                       \
+                       if (m->flags & VM_MTE)
 
-static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
+static unsigned long mte_vma_tag_dump_size(struct core_vma_metadata *m)
 {
-       if (vma->vm_flags & VM_DONTDUMP)
-               return 0;
-
-       return vma_pages(vma) * MTE_PAGE_TAG_STORAGE;
+       return (m->dump_size >> PAGE_SHIFT) * MTE_PAGE_TAG_STORAGE;
 }
 
 /* Derived from dump_user_range(); start/end must be page-aligned */
 static int mte_dump_tag_range(struct coredump_params *cprm,
-                             unsigned long start, unsigned long end)
+                             unsigned long start, unsigned long len)
 {
        int ret = 1;
        unsigned long addr;
        void *tags = NULL;
 
-       for (addr = start; addr < end; addr += PAGE_SIZE) {
+       for (addr = start; addr < start + len; addr += PAGE_SIZE) {
                struct page *page = get_dump_page(addr);
 
                /*
@@ -65,7 +64,6 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
                mte_save_page_tags(page_address(page), tags);
                put_page(page);
                if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
-                       mte_free_tag_storage(tags);
                        ret = 0;
                        break;
                }
@@ -77,13 +75,13 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
        return ret;
 }
 
-Elf_Half elf_core_extra_phdrs(void)
+Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
 {
-       struct vm_area_struct *vma;
+       int i;
+       struct core_vma_metadata *m;
        int vma_count = 0;
-       VMA_ITERATOR(vmi, current->mm, 0);
 
-       for_each_mte_vma(vmi, vma)
+       for_each_mte_vma(cprm, i, m)
                vma_count++;
 
        return vma_count;
@@ -91,18 +89,18 @@ Elf_Half elf_core_extra_phdrs(void)
 
 int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
 {
-       struct vm_area_struct *vma;
-       VMA_ITERATOR(vmi, current->mm, 0);
+       int i;
+       struct core_vma_metadata *m;
 
-       for_each_mte_vma(vmi, vma) {
+       for_each_mte_vma(cprm, i, m) {
                struct elf_phdr phdr;
 
                phdr.p_type = PT_AARCH64_MEMTAG_MTE;
                phdr.p_offset = offset;
-               phdr.p_vaddr = vma->vm_start;
+               phdr.p_vaddr = m->start;
                phdr.p_paddr = 0;
-               phdr.p_filesz = mte_vma_tag_dump_size(vma);
-               phdr.p_memsz = vma->vm_end - vma->vm_start;
+               phdr.p_filesz = mte_vma_tag_dump_size(m);
+               phdr.p_memsz = m->end - m->start;
                offset += phdr.p_filesz;
                phdr.p_flags = 0;
                phdr.p_align = 0;
@@ -114,28 +112,25 @@ int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
        return 1;
 }
 
-size_t elf_core_extra_data_size(void)
+size_t elf_core_extra_data_size(struct coredump_params *cprm)
 {
-       struct vm_area_struct *vma;
+       int i;
+       struct core_vma_metadata *m;
        size_t data_size = 0;
-       VMA_ITERATOR(vmi, current->mm, 0);
 
-       for_each_mte_vma(vmi, vma)
-               data_size += mte_vma_tag_dump_size(vma);
+       for_each_mte_vma(cprm, i, m)
+               data_size += mte_vma_tag_dump_size(m);
 
        return data_size;
 }
 
 int elf_core_write_extra_data(struct coredump_params *cprm)
 {
-       struct vm_area_struct *vma;
-       VMA_ITERATOR(vmi, current->mm, 0);
-
-       for_each_mte_vma(vmi, vma) {
-               if (vma->vm_flags & VM_DONTDUMP)
-                       continue;
+       int i;
+       struct core_vma_metadata *m;
 
-               if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end))
+       for_each_mte_vma(cprm, i, m) {
+               if (!mte_dump_tag_range(cprm, m->start, m->dump_size))
                        return 0;
        }
 
index dcc81e7..b6ef1af 100644 (file)
@@ -385,7 +385,7 @@ static void task_fpsimd_load(void)
        WARN_ON(!system_supports_fpsimd());
        WARN_ON(!have_cpu_fpsimd_context());
 
-       if (system_supports_sve()) {
+       if (system_supports_sve() || system_supports_sme()) {
                switch (current->thread.fp_type) {
                case FP_STATE_FPSIMD:
                        /* Stop tracking SVE for this task until next use. */
index 2686ab1..0c321ad 100644 (file)
@@ -1357,7 +1357,7 @@ enum aarch64_regset {
 #ifdef CONFIG_ARM64_SVE
        REGSET_SVE,
 #endif
-#ifdef CONFIG_ARM64_SVE
+#ifdef CONFIG_ARM64_SME
        REGSET_SSVE,
        REGSET_ZA,
 #endif
index e0d09bf..be279fd 100644 (file)
@@ -281,7 +281,12 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
 
                vl = task_get_sme_vl(current);
        } else {
-               if (!system_supports_sve())
+               /*
+                * A SME only system use SVE for streaming mode so can
+                * have a SVE formatted context with a zero VL and no
+                * payload data.
+                */
+               if (!system_supports_sve() && !system_supports_sme())
                        return -EINVAL;
 
                vl = task_get_sve_vl(current);
@@ -732,7 +737,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
                        return err;
        }
 
-       if (system_supports_sve()) {
+       if (system_supports_sve() || system_supports_sme()) {
                unsigned int vq = 0;
 
                if (add_all || test_thread_flag(TIF_SVE) ||
index 1b8a2dc..9ddcfe2 100644 (file)
@@ -60,7 +60,7 @@ static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
         */
        if (!(esr & ESR_ELx_S1PTW) &&
            (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
-            (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
+            (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM)) {
                if (!__translate_far_to_hpfar(far, &hpfar))
                        return false;
        } else {
index 3330d1b..07d37ff 100644 (file)
@@ -367,7 +367,7 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
        if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
                bool valid;
 
-               valid = kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
+               valid = kvm_vcpu_trap_get_fault_type(vcpu) == ESR_ELx_FSC_FAULT &&
                        kvm_vcpu_dabt_isvalid(vcpu) &&
                        !kvm_vcpu_abt_issea(vcpu) &&
                        !kvm_vcpu_abt_iss1tw(vcpu);
index 31d7fa4..a3ee3b6 100644 (file)
@@ -1212,7 +1212,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
        VM_BUG_ON(write_fault && exec_fault);
 
-       if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
+       if (fault_status == ESR_ELx_FSC_PERM && !write_fault && !exec_fault) {
                kvm_err("Unexpected L2 read permission error\n");
                return -EFAULT;
        }
@@ -1277,7 +1277,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * only exception to this is when dirty logging is enabled at runtime
         * and a write fault needs to collapse a block entry into a table.
         */
-       if (fault_status != FSC_PERM || (logging_active && write_fault)) {
+       if (fault_status != ESR_ELx_FSC_PERM ||
+           (logging_active && write_fault)) {
                ret = kvm_mmu_topup_memory_cache(memcache,
                                                 kvm_mmu_cache_min_pages(kvm));
                if (ret)
@@ -1342,7 +1343,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * backed by a THP and thus use block mapping if possible.
         */
        if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
-               if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE)
+               if (fault_status ==  ESR_ELx_FSC_PERM &&
+                   fault_granule > PAGE_SIZE)
                        vma_pagesize = fault_granule;
                else
                        vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
@@ -1350,7 +1352,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                                                                   &fault_ipa);
        }
 
-       if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
+       if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) {
                /* Check the VMM hasn't introduced a new disallowed VMA */
                if (kvm_vma_mte_allowed(vma)) {
                        sanitise_mte_tags(kvm, pfn, vma_pagesize);
@@ -1376,7 +1378,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * permissions only if vma_pagesize equals fault_granule. Otherwise,
         * kvm_pgtable_stage2_map() should be called to change block size.
         */
-       if (fault_status == FSC_PERM && vma_pagesize == fault_granule)
+       if (fault_status == ESR_ELx_FSC_PERM && vma_pagesize == fault_granule)
                ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
        else
                ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
@@ -1441,7 +1443,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
        fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
        is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
 
-       if (fault_status == FSC_FAULT) {
+       if (fault_status == ESR_ELx_FSC_FAULT) {
                /* Beyond sanitised PARange (which is the IPA limit) */
                if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
                        kvm_inject_size_fault(vcpu);
@@ -1476,8 +1478,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
                              kvm_vcpu_get_hfar(vcpu), fault_ipa);
 
        /* Check the stage-2 fault is trans. fault or write fault */
-       if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
-           fault_status != FSC_ACCESS) {
+       if (fault_status != ESR_ELx_FSC_FAULT &&
+           fault_status != ESR_ELx_FSC_PERM &&
+           fault_status != ESR_ELx_FSC_ACCESS) {
                kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
                        kvm_vcpu_trap_get_class(vcpu),
                        (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
@@ -1539,7 +1542,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
        /* Userspace should not be able to register out-of-bounds IPAs */
        VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
 
-       if (fault_status == FSC_ACCESS) {
+       if (fault_status == ESR_ELx_FSC_ACCESS) {
                handle_access_fault(vcpu, fault_ipa);
                ret = 1;
                goto out_unlock;
index d5ee52d..c6cbfe6 100644 (file)
@@ -646,7 +646,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
                return;
 
        /* Only preserve PMCR_EL0.N, and reset the rest to 0 */
-       pmcr = read_sysreg(pmcr_el0) & ARMV8_PMU_PMCR_N_MASK;
+       pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
        if (!kvm_supports_32bit_el0())
                pmcr |= ARMV8_PMU_PMCR_LC;
 
index 826ff6f..2074521 100644 (file)
@@ -616,6 +616,8 @@ static const struct midr_range broken_seis[] = {
        MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
        MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
        MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
+       MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
+       MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
        {},
 };
 
index 35e9a46..95364e8 100644 (file)
@@ -559,3 +559,24 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
 {
        return __hugetlb_valid_size(size);
 }
+
+pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+       if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
+           cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
+               /*
+                * Break-before-make (BBM) is required for all user space mappings
+                * when the permission changes from executable to non-executable
+                * in cases where cpu is affected with errata #2645198.
+                */
+               if (pte_user_exec(READ_ONCE(*ptep)))
+                       return huge_ptep_clear_flush(vma, addr, ptep);
+       }
+       return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+}
+
+void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+                                 pte_t old_pte, pte_t pte)
+{
+       set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+}
index 14c87e8..d77c9f5 100644 (file)
@@ -1630,3 +1630,24 @@ static int __init prevent_bootmem_remove_init(void)
 }
 early_initcall(prevent_bootmem_remove_init);
 #endif
+
+pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+       if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
+           cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
+               /*
+                * Break-before-make (BBM) is required for all user space mappings
+                * when the permission changes from executable to non-executable
+                * in cases where cpu is affected with errata #2645198.
+                */
+               if (pte_user_exec(READ_ONCE(*ptep)))
+                       return ptep_clear_flush(vma, addr, ptep);
+       }
+       return ptep_get_and_clear(vma->vm_mm, addr, ptep);
+}
+
+void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+                            pte_t old_pte, pte_t pte)
+{
+       set_pte_at(vma->vm_mm, addr, ptep, pte);
+}
index a86ee37..dfeb2c5 100644 (file)
@@ -71,6 +71,7 @@ WORKAROUND_2038923
 WORKAROUND_2064142
 WORKAROUND_2077057
 WORKAROUND_2457168
+WORKAROUND_2645198
 WORKAROUND_2658417
 WORKAROUND_TRBE_OVERWRITE_FILL_MODE
 WORKAROUND_TSB_FLUSH_FAILURE
index 9468052..8895df1 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/elf.h>
 
 
-Elf64_Half elf_core_extra_phdrs(void)
+Elf64_Half elf_core_extra_phdrs(struct coredump_params *cprm)
 {
        return GATE_EHDR->e_phnum;
 }
@@ -60,7 +60,7 @@ int elf_core_write_extra_data(struct coredump_params *cprm)
        return 1;
 }
 
-size_t elf_core_extra_data_size(void)
+size_t elf_core_extra_data_size(struct coredump_params *cprm)
 {
        const struct elf_phdr *const gate_phdrs =
                (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
index af04cea..352d7de 100755 (executable)
@@ -210,6 +210,10 @@ ld_version()
        gsub(".*version ", "");
        gsub("-.*", "");
        split($1,a, ".");
+       if( length(a[3]) == "8" )
+               # a[3] is probably a date of format yyyymmdd used for release snapshots. We
+               # can assume it to be zero as it does not signify a new version as such.
+               a[3] = 0;
        print a[1]*100000000 + a[2]*1000000 + a[3]*10000;
        exit
     }'
index 4f89799..699a885 100644 (file)
@@ -137,7 +137,7 @@ struct imc_pmu {
  * are inited.
  */
 struct imc_pmu_ref {
-       struct mutex lock;
+       spinlock_t lock;
        unsigned int id;
        int refc;
 };
index 80a148c..44a35ed 100644 (file)
@@ -1012,7 +1012,7 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
 
 void hpt_clear_stress(void);
 static struct timer_list stress_hpt_timer;
-void stress_hpt_timer_fn(struct timer_list *timer)
+static void stress_hpt_timer_fn(struct timer_list *timer)
 {
        int next_cpu;
 
index d517aba..100e97d 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/cputhreads.h>
 #include <asm/smp.h>
 #include <linux/string.h>
+#include <linux/spinlock.h>
 
 /* Nest IMC data structures and variables */
 
@@ -21,7 +22,7 @@
  * Used to avoid races in counting the nest-pmu units during hotplug
  * register and unregister
  */
-static DEFINE_MUTEX(nest_init_lock);
+static DEFINE_SPINLOCK(nest_init_lock);
 static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
 static struct imc_pmu **per_nest_pmu_arr;
 static cpumask_t nest_imc_cpumask;
@@ -50,7 +51,7 @@ static int trace_imc_mem_size;
  * core and trace-imc
  */
 static struct imc_pmu_ref imc_global_refc = {
-       .lock = __MUTEX_INITIALIZER(imc_global_refc.lock),
+       .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
        .id = 0,
        .refc = 0,
 };
@@ -400,7 +401,7 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
                                       get_hard_smp_processor_id(cpu));
                /*
                 * If this is the last cpu in this chip then, skip the reference
-                * count mutex lock and make the reference count on this chip zero.
+                * count lock and make the reference count on this chip zero.
                 */
                ref = get_nest_pmu_ref(cpu);
                if (!ref)
@@ -462,15 +463,15 @@ static void nest_imc_counters_release(struct perf_event *event)
        /*
         * See if we need to disable the nest PMU.
         * If no events are currently in use, then we have to take a
-        * mutex to ensure that we don't race with another task doing
+        * lock to ensure that we don't race with another task doing
         * enable or disable the nest counters.
         */
        ref = get_nest_pmu_ref(event->cpu);
        if (!ref)
                return;
 
-       /* Take the mutex lock for this node and then decrement the reference count */
-       mutex_lock(&ref->lock);
+       /* Take the lock for this node and then decrement the reference count */
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                /*
                 * The scenario where this is true is, when perf session is
@@ -482,7 +483,7 @@ static void nest_imc_counters_release(struct perf_event *event)
                 * an OPAL call to disable the engine in that node.
                 *
                 */
-               mutex_unlock(&ref->lock);
+               spin_unlock(&ref->lock);
                return;
        }
        ref->refc--;
@@ -490,7 +491,7 @@ static void nest_imc_counters_release(struct perf_event *event)
                rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
                                            get_hard_smp_processor_id(event->cpu));
                if (rc) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id);
                        return;
                }
@@ -498,7 +499,7 @@ static void nest_imc_counters_release(struct perf_event *event)
                WARN(1, "nest-imc: Invalid event reference count\n");
                ref->refc = 0;
        }
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 }
 
 static int nest_imc_event_init(struct perf_event *event)
@@ -557,26 +558,25 @@ static int nest_imc_event_init(struct perf_event *event)
 
        /*
         * Get the imc_pmu_ref struct for this node.
-        * Take the mutex lock and then increment the count of nest pmu events
-        * inited.
+        * Take the lock and then increment the count of nest pmu events inited.
         */
        ref = get_nest_pmu_ref(event->cpu);
        if (!ref)
                return -EINVAL;
 
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST,
                                             get_hard_smp_processor_id(event->cpu));
                if (rc) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("nest-imc: Unable to start the counters for node %d\n",
                                                                        node_id);
                        return rc;
                }
        }
        ++ref->refc;
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 
        event->destroy = nest_imc_counters_release;
        return 0;
@@ -612,9 +612,8 @@ static int core_imc_mem_init(int cpu, int size)
                return -ENOMEM;
        mem_info->vbase = page_address(page);
 
-       /* Init the mutex */
        core_imc_refc[core_id].id = core_id;
-       mutex_init(&core_imc_refc[core_id].lock);
+       spin_lock_init(&core_imc_refc[core_id].lock);
 
        rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,
                                __pa((void *)mem_info->vbase),
@@ -703,9 +702,8 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
                perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
        } else {
                /*
-                * If this is the last cpu in this core then, skip taking refernce
-                * count mutex lock for this core and directly zero "refc" for
-                * this core.
+                * If this is the last cpu in this core then skip taking reference
+                * count lock for this core and directly zero "refc" for this core.
                 */
                opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
                                       get_hard_smp_processor_id(cpu));
@@ -720,11 +718,11 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
                 * last cpu in this core and core-imc event running
                 * in this cpu.
                 */
-               mutex_lock(&imc_global_refc.lock);
+               spin_lock(&imc_global_refc.lock);
                if (imc_global_refc.id == IMC_DOMAIN_CORE)
                        imc_global_refc.refc--;
 
-               mutex_unlock(&imc_global_refc.lock);
+               spin_unlock(&imc_global_refc.lock);
        }
        return 0;
 }
@@ -739,7 +737,7 @@ static int core_imc_pmu_cpumask_init(void)
 
 static void reset_global_refc(struct perf_event *event)
 {
-               mutex_lock(&imc_global_refc.lock);
+               spin_lock(&imc_global_refc.lock);
                imc_global_refc.refc--;
 
                /*
@@ -751,7 +749,7 @@ static void reset_global_refc(struct perf_event *event)
                        imc_global_refc.refc = 0;
                        imc_global_refc.id = 0;
                }
-               mutex_unlock(&imc_global_refc.lock);
+               spin_unlock(&imc_global_refc.lock);
 }
 
 static void core_imc_counters_release(struct perf_event *event)
@@ -764,17 +762,17 @@ static void core_imc_counters_release(struct perf_event *event)
        /*
         * See if we need to disable the IMC PMU.
         * If no events are currently in use, then we have to take a
-        * mutex to ensure that we don't race with another task doing
+        * lock to ensure that we don't race with another task doing
         * enable or disable the core counters.
         */
        core_id = event->cpu / threads_per_core;
 
-       /* Take the mutex lock and decrement the refernce count for this core */
+       /* Take the lock and decrement the refernce count for this core */
        ref = &core_imc_refc[core_id];
        if (!ref)
                return;
 
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                /*
                 * The scenario where this is true is, when perf session is
@@ -786,7 +784,7 @@ static void core_imc_counters_release(struct perf_event *event)
                 * an OPAL call to disable the engine in that core.
                 *
                 */
-               mutex_unlock(&ref->lock);
+               spin_unlock(&ref->lock);
                return;
        }
        ref->refc--;
@@ -794,7 +792,7 @@ static void core_imc_counters_release(struct perf_event *event)
                rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
                                            get_hard_smp_processor_id(event->cpu));
                if (rc) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("IMC: Unable to stop the counters for core %d\n", core_id);
                        return;
                }
@@ -802,7 +800,7 @@ static void core_imc_counters_release(struct perf_event *event)
                WARN(1, "core-imc: Invalid event reference count\n");
                ref->refc = 0;
        }
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 
        reset_global_refc(event);
 }
@@ -840,7 +838,6 @@ static int core_imc_event_init(struct perf_event *event)
        if ((!pcmi->vbase))
                return -ENODEV;
 
-       /* Get the core_imc mutex for this core */
        ref = &core_imc_refc[core_id];
        if (!ref)
                return -EINVAL;
@@ -848,22 +845,22 @@ static int core_imc_event_init(struct perf_event *event)
        /*
         * Core pmu units are enabled only when it is used.
         * See if this is triggered for the first time.
-        * If yes, take the mutex lock and enable the core counters.
+        * If yes, take the lock and enable the core counters.
         * If not, just increment the count in core_imc_refc struct.
         */
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
                                             get_hard_smp_processor_id(event->cpu));
                if (rc) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("core-imc: Unable to start the counters for core %d\n",
                                                                        core_id);
                        return rc;
                }
        }
        ++ref->refc;
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 
        /*
         * Since the system can run either in accumulation or trace-mode
@@ -874,7 +871,7 @@ static int core_imc_event_init(struct perf_event *event)
         * to know whether any other trace/thread imc
         * events are running.
         */
-       mutex_lock(&imc_global_refc.lock);
+       spin_lock(&imc_global_refc.lock);
        if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
                /*
                 * No other trace/thread imc events are running in
@@ -883,10 +880,10 @@ static int core_imc_event_init(struct perf_event *event)
                imc_global_refc.id = IMC_DOMAIN_CORE;
                imc_global_refc.refc++;
        } else {
-               mutex_unlock(&imc_global_refc.lock);
+               spin_unlock(&imc_global_refc.lock);
                return -EBUSY;
        }
-       mutex_unlock(&imc_global_refc.lock);
+       spin_unlock(&imc_global_refc.lock);
 
        event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
        event->destroy = core_imc_counters_release;
@@ -958,10 +955,10 @@ static int ppc_thread_imc_cpu_offline(unsigned int cpu)
        mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
 
        /* Reduce the refc if thread-imc event running on this cpu */
-       mutex_lock(&imc_global_refc.lock);
+       spin_lock(&imc_global_refc.lock);
        if (imc_global_refc.id == IMC_DOMAIN_THREAD)
                imc_global_refc.refc--;
-       mutex_unlock(&imc_global_refc.lock);
+       spin_unlock(&imc_global_refc.lock);
 
        return 0;
 }
@@ -1001,7 +998,7 @@ static int thread_imc_event_init(struct perf_event *event)
        if (!target)
                return -EINVAL;
 
-       mutex_lock(&imc_global_refc.lock);
+       spin_lock(&imc_global_refc.lock);
        /*
         * Check if any other trace/core imc events are running in the
         * system, if not set the global id to thread-imc.
@@ -1010,10 +1007,10 @@ static int thread_imc_event_init(struct perf_event *event)
                imc_global_refc.id = IMC_DOMAIN_THREAD;
                imc_global_refc.refc++;
        } else {
-               mutex_unlock(&imc_global_refc.lock);
+               spin_unlock(&imc_global_refc.lock);
                return -EBUSY;
        }
-       mutex_unlock(&imc_global_refc.lock);
+       spin_unlock(&imc_global_refc.lock);
 
        event->pmu->task_ctx_nr = perf_sw_context;
        event->destroy = reset_global_refc;
@@ -1135,25 +1132,25 @@ static int thread_imc_event_add(struct perf_event *event, int flags)
        /*
         * imc pmus are enabled only when it is used.
         * See if this is triggered for the first time.
-        * If yes, take the mutex lock and enable the counters.
+        * If yes, take the lock and enable the counters.
         * If not, just increment the count in ref count struct.
         */
        ref = &core_imc_refc[core_id];
        if (!ref)
                return -EINVAL;
 
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
                    get_hard_smp_processor_id(smp_processor_id()))) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("thread-imc: Unable to start the counter\
                                for core %d\n", core_id);
                        return -EINVAL;
                }
        }
        ++ref->refc;
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
        return 0;
 }
 
@@ -1170,12 +1167,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
                return;
        }
 
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        ref->refc--;
        if (ref->refc == 0) {
                if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
                    get_hard_smp_processor_id(smp_processor_id()))) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("thread-imc: Unable to stop the counters\
                                for core %d\n", core_id);
                        return;
@@ -1183,7 +1180,7 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
        } else if (ref->refc < 0) {
                ref->refc = 0;
        }
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 
        /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
        mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
@@ -1224,9 +1221,8 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
                }
        }
 
-       /* Init the mutex, if not already */
        trace_imc_refc[core_id].id = core_id;
-       mutex_init(&trace_imc_refc[core_id].lock);
+       spin_lock_init(&trace_imc_refc[core_id].lock);
 
        mtspr(SPRN_LDBAR, 0);
        return 0;
@@ -1246,10 +1242,10 @@ static int ppc_trace_imc_cpu_offline(unsigned int cpu)
         * Reduce the refc if any trace-imc event running
         * on this cpu.
         */
-       mutex_lock(&imc_global_refc.lock);
+       spin_lock(&imc_global_refc.lock);
        if (imc_global_refc.id == IMC_DOMAIN_TRACE)
                imc_global_refc.refc--;
-       mutex_unlock(&imc_global_refc.lock);
+       spin_unlock(&imc_global_refc.lock);
 
        return 0;
 }
@@ -1371,17 +1367,17 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
        }
 
        mtspr(SPRN_LDBAR, ldbar_value);
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE,
                                get_hard_smp_processor_id(smp_processor_id()))) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
                        return -EINVAL;
                }
        }
        ++ref->refc;
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
        return 0;
 }
 
@@ -1414,19 +1410,19 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
                return;
        }
 
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        ref->refc--;
        if (ref->refc == 0) {
                if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE,
                                get_hard_smp_processor_id(smp_processor_id()))) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id);
                        return;
                }
        } else if (ref->refc < 0) {
                ref->refc = 0;
        }
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 
        trace_imc_event_stop(event, flags);
 }
@@ -1448,7 +1444,7 @@ static int trace_imc_event_init(struct perf_event *event)
         * no other thread is running any core/thread imc
         * events
         */
-       mutex_lock(&imc_global_refc.lock);
+       spin_lock(&imc_global_refc.lock);
        if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
                /*
                 * No core/thread imc events are running in the
@@ -1457,10 +1453,10 @@ static int trace_imc_event_init(struct perf_event *event)
                imc_global_refc.id = IMC_DOMAIN_TRACE;
                imc_global_refc.refc++;
        } else {
-               mutex_unlock(&imc_global_refc.lock);
+               spin_unlock(&imc_global_refc.lock);
                return -EBUSY;
        }
-       mutex_unlock(&imc_global_refc.lock);
+       spin_unlock(&imc_global_refc.lock);
 
        event->hw.idx = -1;
 
@@ -1533,10 +1529,10 @@ static int init_nest_pmu_ref(void)
        i = 0;
        for_each_node(nid) {
                /*
-                * Mutex lock to avoid races while tracking the number of
+                * Take the lock to avoid races while tracking the number of
                 * sessions using the chip's nest pmu units.
                 */
-               mutex_init(&nest_imc_refc[i].lock);
+               spin_lock_init(&nest_imc_refc[i].lock);
 
                /*
                 * Loop to init the "id" with the node_id. Variable "i" initialized to
@@ -1633,7 +1629,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
 static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
 {
        if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
-               mutex_lock(&nest_init_lock);
+               spin_lock(&nest_init_lock);
                if (nest_pmus == 1) {
                        cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
                        kfree(nest_imc_refc);
@@ -1643,7 +1639,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
 
                if (nest_pmus > 0)
                        nest_pmus--;
-               mutex_unlock(&nest_init_lock);
+               spin_unlock(&nest_init_lock);
        }
 
        /* Free core_imc memory */
@@ -1800,11 +1796,11 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
                * rest. To handle the cpuhotplug callback unregister, we track
                * the number of nest pmus in "nest_pmus".
                */
-               mutex_lock(&nest_init_lock);
+               spin_lock(&nest_init_lock);
                if (nest_pmus == 0) {
                        ret = init_nest_pmu_ref();
                        if (ret) {
-                               mutex_unlock(&nest_init_lock);
+                               spin_unlock(&nest_init_lock);
                                kfree(per_nest_pmu_arr);
                                per_nest_pmu_arr = NULL;
                                goto err_free_mem;
@@ -1812,7 +1808,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
                        /* Register for cpu hotplug notification. */
                        ret = nest_pmu_cpumask_init();
                        if (ret) {
-                               mutex_unlock(&nest_init_lock);
+                               spin_unlock(&nest_init_lock);
                                kfree(nest_imc_refc);
                                kfree(per_nest_pmu_arr);
                                per_nest_pmu_arr = NULL;
@@ -1820,7 +1816,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
                        }
                }
                nest_pmus++;
-               mutex_unlock(&nest_init_lock);
+               spin_unlock(&nest_init_lock);
                break;
        case IMC_DOMAIN_CORE:
                ret = core_imc_pmu_cpumask_init();
index e27c214..8dcd7af 100644 (file)
@@ -23,9 +23,9 @@
 #define memmove memmove
 #define memzero(s, n) memset((s), 0, (n))
 
-#ifdef CONFIG_KERNEL_BZIP2
+#if defined(CONFIG_KERNEL_BZIP2)
 #define BOOT_HEAP_SIZE 0x400000
-#elif CONFIG_KERNEL_ZSTD
+#elif defined(CONFIG_KERNEL_ZSTD)
 #define BOOT_HEAP_SIZE 0x30000
 #else
 #define BOOT_HEAP_SIZE 0x10000
index a7b4e1d..74b35ec 100644 (file)
@@ -190,7 +190,6 @@ CONFIG_NFT_CT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
-CONFIG_NFT_OBJREF=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
@@ -569,6 +568,7 @@ CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 CONFIG_LEGACY_PTY_COUNT=0
+# CONFIG_LEGACY_TIOCSTI is not set
 CONFIG_VIRTIO_CONSOLE=m
 CONFIG_HW_RANDOM_VIRTIO=m
 CONFIG_HANGCHECK_TIMER=m
@@ -660,6 +660,7 @@ CONFIG_CONFIGFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y
 CONFIG_SQUASHFS_XATTR=y
 CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
@@ -705,6 +706,7 @@ CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
 CONFIG_SECURITY_LANDLOCK=y
 CONFIG_INTEGRITY_SIGNATURE=y
 CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
+CONFIG_INTEGRITY_PLATFORM_KEYRING=y
 CONFIG_IMA=y
 CONFIG_IMA_DEFAULT_HASH_SHA256=y
 CONFIG_IMA_WRITE_POLICY=y
@@ -781,6 +783,7 @@ CONFIG_ZCRYPT=m
 CONFIG_PKEY=m
 CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_DEV_VIRTIO=m
+CONFIG_SYSTEM_BLACKLIST_KEYRING=y
 CONFIG_CORDIC=m
 CONFIG_CRYPTO_LIB_CURVE25519=m
 CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
@@ -848,7 +851,6 @@ CONFIG_PREEMPT_TRACER=y
 CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_BPF_KPROBE_OVERRIDE=y
 CONFIG_HIST_TRIGGERS=y
 CONFIG_FTRACE_STARTUP_TEST=y
 # CONFIG_EVENT_TRACE_STARTUP_TEST is not set
@@ -870,7 +872,6 @@ CONFIG_FAIL_MAKE_REQUEST=y
 CONFIG_FAIL_IO_TIMEOUT=y
 CONFIG_FAIL_FUTEX=y
 CONFIG_FAULT_INJECTION_DEBUG_FS=y
-CONFIG_FAIL_FUNCTION=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LKDTM=m
 CONFIG_TEST_MIN_HEAP=y
index 2bc2d0f..cec7126 100644 (file)
@@ -181,7 +181,6 @@ CONFIG_NFT_CT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
-CONFIG_NFT_OBJREF=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
@@ -559,6 +558,7 @@ CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 CONFIG_LEGACY_PTY_COUNT=0
+# CONFIG_LEGACY_TIOCSTI is not set
 CONFIG_VIRTIO_CONSOLE=m
 CONFIG_HW_RANDOM_VIRTIO=m
 CONFIG_HANGCHECK_TIMER=m
@@ -645,6 +645,7 @@ CONFIG_CONFIGFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y
 CONFIG_SQUASHFS_XATTR=y
 CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
@@ -688,6 +689,7 @@ CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
 CONFIG_SECURITY_LANDLOCK=y
 CONFIG_INTEGRITY_SIGNATURE=y
 CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
+CONFIG_INTEGRITY_PLATFORM_KEYRING=y
 CONFIG_IMA=y
 CONFIG_IMA_DEFAULT_HASH_SHA256=y
 CONFIG_IMA_WRITE_POLICY=y
@@ -766,6 +768,7 @@ CONFIG_ZCRYPT=m
 CONFIG_PKEY=m
 CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_DEV_VIRTIO=m
+CONFIG_SYSTEM_BLACKLIST_KEYRING=y
 CONFIG_CORDIC=m
 CONFIG_PRIME_NUMBERS=m
 CONFIG_CRYPTO_LIB_CURVE25519=m
@@ -798,7 +801,6 @@ CONFIG_STACK_TRACER=y
 CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_BPF_KPROBE_OVERRIDE=y
 CONFIG_HIST_TRIGGERS=y
 CONFIG_SAMPLES=y
 CONFIG_SAMPLE_TRACE_PRINTK=m
index ae14ab0..a9c0c81 100644 (file)
@@ -13,7 +13,6 @@ CONFIG_TUNE_ZEC12=y
 # CONFIG_COMPAT is not set
 CONFIG_NR_CPUS=2
 CONFIG_HZ_100=y
-# CONFIG_RELOCATABLE is not set
 # CONFIG_CHSC_SCH is not set
 # CONFIG_SCM_BUS is not set
 CONFIG_CRASH_DUMP=y
@@ -50,6 +49,7 @@ CONFIG_ZFCP=y
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
+# CONFIG_LEGACY_TIOCSTI is not set
 # CONFIG_HVC_IUCV is not set
 # CONFIG_HW_RANDOM_S390 is not set
 # CONFIG_HMC_DRV is not set
index feaba12..efa103b 100644 (file)
@@ -131,19 +131,21 @@ struct hws_combined_entry {
        struct hws_diag_entry   diag;   /* Diagnostic-sampling data entry */
 } __packed;
 
-struct hws_trailer_entry {
-       union {
-               struct {
-                       unsigned int f:1;       /* 0 - Block Full Indicator   */
-                       unsigned int a:1;       /* 1 - Alert request control  */
-                       unsigned int t:1;       /* 2 - Timestamp format       */
-                       unsigned int :29;       /* 3 - 31: Reserved           */
-                       unsigned int bsdes:16;  /* 32-47: size of basic SDE   */
-                       unsigned int dsdes:16;  /* 48-63: size of diagnostic SDE */
-               };
-               unsigned long long flags;       /* 0 - 63: All indicators     */
+union hws_trailer_header {
+       struct {
+               unsigned int f:1;       /* 0 - Block Full Indicator   */
+               unsigned int a:1;       /* 1 - Alert request control  */
+               unsigned int t:1;       /* 2 - Timestamp format       */
+               unsigned int :29;       /* 3 - 31: Reserved           */
+               unsigned int bsdes:16;  /* 32-47: size of basic SDE   */
+               unsigned int dsdes:16;  /* 48-63: size of diagnostic SDE */
+               unsigned long long overflow; /* 64 - Overflow Count   */
        };
-       unsigned long long overflow;     /* 64 - sample Overflow count        */
+       __uint128_t val;
+};
+
+struct hws_trailer_entry {
+       union hws_trailer_header header; /* 0 - 15 Flags + Overflow Count     */
        unsigned char timestamp[16];     /* 16 - 31 timestamp                 */
        unsigned long long reserved1;    /* 32 -Reserved                      */
        unsigned long long reserved2;    /*                                   */
@@ -290,14 +292,11 @@ static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi,
        return USEC_PER_SEC * qsi->cpu_speed / rate;
 }
 
-#define SDB_TE_ALERT_REQ_MASK  0x4000000000000000UL
-#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL
-
 /* Return TOD timestamp contained in an trailer entry */
 static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
 {
        /* TOD in STCKE format */
-       if (te->t)
+       if (te->header.t)
                return *((unsigned long long *) &te->timestamp[1]);
 
        /* TOD in STCK format */
index 77f2426..ac665b9 100644 (file)
@@ -4,8 +4,8 @@
  *
  *    Copyright IBM Corp. 1999, 2020
  */
-#ifndef DEBUG_H
-#define DEBUG_H
+#ifndef _ASM_S390_DEBUG_H
+#define _ASM_S390_DEBUG_H
 
 #include <linux/string.h>
 #include <linux/spinlock.h>
@@ -487,4 +487,4 @@ void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas);
 
 #endif /* MODULE */
 
-#endif /* DEBUG_H */
+#endif /* _ASM_S390_DEBUG_H */
index cb5fc06..081837b 100644 (file)
@@ -31,7 +31,7 @@
        pcp_op_T__ *ptr__;                                              \
        preempt_disable_notrace();                                      \
        ptr__ = raw_cpu_ptr(&(pcp));                                    \
-       prev__ = *ptr__;                                                \
+       prev__ = READ_ONCE(*ptr__);                                     \
        do {                                                            \
                old__ = prev__;                                         \
                new__ = old__ op (val);                                 \
index fc6d5f5..2df94d3 100644 (file)
@@ -187,8 +187,6 @@ static int kexec_file_add_ipl_report(struct kimage *image,
 
        data->memsz = ALIGN(data->memsz, PAGE_SIZE);
        buf.mem = data->memsz;
-       if (image->type == KEXEC_TYPE_CRASH)
-               buf.mem += crashk_res.start;
 
        ptr = (void *)ipl_cert_list_addr;
        end = ptr + ipl_cert_list_size;
@@ -225,6 +223,9 @@ static int kexec_file_add_ipl_report(struct kimage *image,
                data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
        *lc_ipl_parmblock_ptr = (__u32)buf.mem;
 
+       if (image->type == KEXEC_TYPE_CRASH)
+               buf.mem += crashk_res.start;
+
        ret = kexec_add_buffer(&buf);
 out:
        return ret;
index 332a499..ce886a0 100644 (file)
@@ -163,14 +163,15 @@ static void free_sampling_buffer(struct sf_buffer *sfb)
 
 static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags)
 {
-       unsigned long sdb, *trailer;
+       struct hws_trailer_entry *te;
+       unsigned long sdb;
 
        /* Allocate and initialize sample-data-block */
        sdb = get_zeroed_page(gfp_flags);
        if (!sdb)
                return -ENOMEM;
-       trailer = trailer_entry_ptr(sdb);
-       *trailer = SDB_TE_ALERT_REQ_MASK;
+       te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb);
+       te->header.a = 1;
 
        /* Link SDB into the sample-data-block-table */
        *sdbt = sdb;
@@ -1206,7 +1207,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
                                            "%s: Found unknown"
                                            " sampling data entry: te->f %i"
                                            " basic.def %#4x (%p)\n", __func__,
-                                           te->f, sample->def, sample);
+                                           te->header.f, sample->def, sample);
                        /* Sample slot is not yet written or other record.
                         *
                         * This condition can occur if the buffer was reused
@@ -1217,7 +1218,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
                         * that are not full.  Stop processing if the first
                         * invalid format was detected.
                         */
-                       if (!te->f)
+                       if (!te->header.f)
                                break;
                }
 
@@ -1227,6 +1228,16 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
        }
 }
 
+static inline __uint128_t __cdsg(__uint128_t *ptr, __uint128_t old, __uint128_t new)
+{
+       asm volatile(
+               "       cdsg    %[old],%[new],%[ptr]\n"
+               : [old] "+d" (old), [ptr] "+QS" (*ptr)
+               : [new] "d" (new)
+               : "memory", "cc");
+       return old;
+}
+
 /* hw_perf_event_update() - Process sampling buffer
  * @event:     The perf event
  * @flush_all: Flag to also flush partially filled sample-data-blocks
@@ -1243,10 +1254,11 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
  */
 static void hw_perf_event_update(struct perf_event *event, int flush_all)
 {
+       unsigned long long event_overflow, sampl_overflow, num_sdb;
+       union hws_trailer_header old, prev, new;
        struct hw_perf_event *hwc = &event->hw;
        struct hws_trailer_entry *te;
        unsigned long *sdbt;
-       unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags;
        int done;
 
        /*
@@ -1266,25 +1278,25 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
                te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt);
 
                /* Leave loop if no more work to do (block full indicator) */
-               if (!te->f) {
+               if (!te->header.f) {
                        done = 1;
                        if (!flush_all)
                                break;
                }
 
                /* Check the sample overflow count */
-               if (te->overflow)
+               if (te->header.overflow)
                        /* Account sample overflows and, if a particular limit
                         * is reached, extend the sampling buffer.
                         * For details, see sfb_account_overflows().
                         */
-                       sampl_overflow += te->overflow;
+                       sampl_overflow += te->header.overflow;
 
                /* Timestamps are valid for full sample-data-blocks only */
                debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx "
                                    "overflow %llu timestamp %#llx\n",
-                                   __func__, (unsigned long)sdbt, te->overflow,
-                                   (te->f) ? trailer_timestamp(te) : 0ULL);
+                                   __func__, (unsigned long)sdbt, te->header.overflow,
+                                   (te->header.f) ? trailer_timestamp(te) : 0ULL);
 
                /* Collect all samples from a single sample-data-block and
                 * flag if an (perf) event overflow happened.  If so, the PMU
@@ -1294,12 +1306,16 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
                num_sdb++;
 
                /* Reset trailer (using compare-double-and-swap) */
+               /* READ_ONCE() 16 byte header */
+               prev.val = __cdsg(&te->header.val, 0, 0);
                do {
-                       te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
-                       te_flags |= SDB_TE_ALERT_REQ_MASK;
-               } while (!cmpxchg_double(&te->flags, &te->overflow,
-                                        te->flags, te->overflow,
-                                        te_flags, 0ULL));
+                       old.val = prev.val;
+                       new.val = prev.val;
+                       new.f = 0;
+                       new.a = 1;
+                       new.overflow = 0;
+                       prev.val = __cdsg(&te->header.val, old.val, new.val);
+               } while (prev.val != old.val);
 
                /* Advance to next sample-data-block */
                sdbt++;
@@ -1384,7 +1400,7 @@ static void aux_output_end(struct perf_output_handle *handle)
        range_scan = AUX_SDB_NUM_ALERT(aux);
        for (i = 0, idx = aux->head; i < range_scan; i++, idx++) {
                te = aux_sdb_trailer(aux, idx);
-               if (!(te->flags & SDB_TE_BUFFER_FULL_MASK))
+               if (!te->header.f)
                        break;
        }
        /* i is num of SDBs which are full */
@@ -1392,7 +1408,7 @@ static void aux_output_end(struct perf_output_handle *handle)
 
        /* Remove alert indicators in the buffer */
        te = aux_sdb_trailer(aux, aux->alert_mark);
-       te->flags &= ~SDB_TE_ALERT_REQ_MASK;
+       te->header.a = 0;
 
        debug_sprintf_event(sfdbg, 6, "%s: SDBs %ld range %ld head %ld\n",
                            __func__, i, range_scan, aux->head);
@@ -1437,9 +1453,9 @@ static int aux_output_begin(struct perf_output_handle *handle,
                idx = aux->empty_mark + 1;
                for (i = 0; i < range_scan; i++, idx++) {
                        te = aux_sdb_trailer(aux, idx);
-                       te->flags &= ~(SDB_TE_BUFFER_FULL_MASK |
-                                      SDB_TE_ALERT_REQ_MASK);
-                       te->overflow = 0;
+                       te->header.f = 0;
+                       te->header.a = 0;
+                       te->header.overflow = 0;
                }
                /* Save the position of empty SDBs */
                aux->empty_mark = aux->head + range - 1;
@@ -1448,7 +1464,7 @@ static int aux_output_begin(struct perf_output_handle *handle,
        /* Set alert indicator */
        aux->alert_mark = aux->head + range/2 - 1;
        te = aux_sdb_trailer(aux, aux->alert_mark);
-       te->flags = te->flags | SDB_TE_ALERT_REQ_MASK;
+       te->header.a = 1;
 
        /* Reset hardware buffer head */
        head = AUX_SDB_INDEX(aux, aux->head);
@@ -1475,14 +1491,17 @@ static int aux_output_begin(struct perf_output_handle *handle,
 static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
                          unsigned long long *overflow)
 {
-       unsigned long long orig_overflow, orig_flags, new_flags;
+       union hws_trailer_header old, prev, new;
        struct hws_trailer_entry *te;
 
        te = aux_sdb_trailer(aux, alert_index);
+       /* READ_ONCE() 16 byte header */
+       prev.val = __cdsg(&te->header.val, 0, 0);
        do {
-               orig_flags = te->flags;
-               *overflow = orig_overflow = te->overflow;
-               if (orig_flags & SDB_TE_BUFFER_FULL_MASK) {
+               old.val = prev.val;
+               new.val = prev.val;
+               *overflow = old.overflow;
+               if (old.f) {
                        /*
                         * SDB is already set by hardware.
                         * Abort and try to set somewhere
@@ -1490,10 +1509,10 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
                         */
                        return false;
                }
-               new_flags = orig_flags | SDB_TE_ALERT_REQ_MASK;
-       } while (!cmpxchg_double(&te->flags, &te->overflow,
-                                orig_flags, orig_overflow,
-                                new_flags, 0ULL));
+               new.a = 1;
+               new.overflow = 0;
+               prev.val = __cdsg(&te->header.val, old.val, new.val);
+       } while (prev.val != old.val);
        return true;
 }
 
@@ -1522,8 +1541,9 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
 static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
                             unsigned long long *overflow)
 {
-       unsigned long long orig_overflow, orig_flags, new_flags;
        unsigned long i, range_scan, idx, idx_old;
+       union hws_trailer_header old, prev, new;
+       unsigned long long orig_overflow;
        struct hws_trailer_entry *te;
 
        debug_sprintf_event(sfdbg, 6, "%s: range %ld head %ld alert %ld "
@@ -1554,17 +1574,20 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
        idx_old = idx = aux->empty_mark + 1;
        for (i = 0; i < range_scan; i++, idx++) {
                te = aux_sdb_trailer(aux, idx);
+               /* READ_ONCE() 16 byte header */
+               prev.val = __cdsg(&te->header.val, 0, 0);
                do {
-                       orig_flags = te->flags;
-                       orig_overflow = te->overflow;
-                       new_flags = orig_flags & ~SDB_TE_BUFFER_FULL_MASK;
+                       old.val = prev.val;
+                       new.val = prev.val;
+                       orig_overflow = old.overflow;
+                       new.f = 0;
+                       new.overflow = 0;
                        if (idx == aux->alert_mark)
-                               new_flags |= SDB_TE_ALERT_REQ_MASK;
+                               new.a = 1;
                        else
-                               new_flags &= ~SDB_TE_ALERT_REQ_MASK;
-               } while (!cmpxchg_double(&te->flags, &te->overflow,
-                                        orig_flags, orig_overflow,
-                                        new_flags, 0ULL));
+                               new.a = 0;
+                       prev.val = __cdsg(&te->header.val, old.val, new.val);
+               } while (prev.val != old.val);
                *overflow += orig_overflow;
        }
 
index 5ea3830..cbf9c1b 100644 (file)
@@ -17,6 +17,8 @@
 /* Handle ro_after_init data on our own. */
 #define RO_AFTER_INIT_DATA
 
+#define RUNTIME_DISCARD_EXIT
+
 #define EMITS_PT_NOTE
 
 #include <asm-generic/vmlinux.lds.h>
@@ -79,6 +81,7 @@ SECTIONS
                _end_amode31_refs = .;
        }
 
+       . = ALIGN(PAGE_SIZE);
        _edata = .;             /* End of data section */
 
        /* will be freed after init */
@@ -193,6 +196,7 @@ SECTIONS
 
        BSS_SECTION(PAGE_SIZE, 4 * PAGE_SIZE, PAGE_SIZE)
 
+       . = ALIGN(PAGE_SIZE);
        _end = . ;
 
        /*
index 1dae78d..ab26aa5 100644 (file)
@@ -83,8 +83,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
                struct esca_block *sca = vcpu->kvm->arch.sca;
                union esca_sigp_ctrl *sigp_ctrl =
                        &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
-               union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
+               union esca_sigp_ctrl new_val = {0}, old_val;
 
+               old_val = READ_ONCE(*sigp_ctrl);
                new_val.scn = src_id;
                new_val.c = 1;
                old_val.c = 0;
@@ -95,8 +96,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
                struct bsca_block *sca = vcpu->kvm->arch.sca;
                union bsca_sigp_ctrl *sigp_ctrl =
                        &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
-               union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
+               union bsca_sigp_ctrl new_val = {0}, old_val;
 
+               old_val = READ_ONCE(*sigp_ctrl);
                new_val.scn = src_id;
                new_val.c = 1;
                old_val.c = 0;
@@ -126,16 +128,18 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
                struct esca_block *sca = vcpu->kvm->arch.sca;
                union esca_sigp_ctrl *sigp_ctrl =
                        &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
-               union esca_sigp_ctrl old = *sigp_ctrl;
+               union esca_sigp_ctrl old;
 
+               old = READ_ONCE(*sigp_ctrl);
                expect = old.value;
                rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
        } else {
                struct bsca_block *sca = vcpu->kvm->arch.sca;
                union bsca_sigp_ctrl *sigp_ctrl =
                        &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
-               union bsca_sigp_ctrl old = *sigp_ctrl;
+               union bsca_sigp_ctrl old;
 
+               old = READ_ONCE(*sigp_ctrl);
                expect = old.value;
                rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
        }
index a889a3a..d1ce73f 100644 (file)
@@ -28,7 +28,7 @@
 #define pmd_ERROR(e) \
        printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
 
-typedef struct {
+typedef union {
        struct {
                unsigned long pmd_low;
                unsigned long pmd_high;
index 5521ea1..aa9b964 100644 (file)
@@ -32,7 +32,7 @@ intcall:
        movw    %dx, %si
        movw    %sp, %di
        movw    $11, %cx
-       rep; movsd
+       rep; movsl
 
        /* Pop full state from the stack */
        popal
@@ -67,7 +67,7 @@ intcall:
        jz      4f
        movw    %sp, %si
        movw    $11, %cx
-       rep; movsd
+       rep; movsl
 4:     addw    $44, %sp
 
        /* Restore state and return */
index a2834bc..3019fb1 100644 (file)
@@ -41,6 +41,7 @@
  *     MSR_CORE_C1_RES: CORE C1 Residency Counter
  *                      perf code: 0x00
  *                      Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL
+ *                                       MTL
  *                      Scope: Core (each processor core has a MSR)
  *     MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
  *                            perf code: 0x01
  *                            perf code: 0x02
  *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
  *                                             SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- *                                             TGL,TNT,RKL,ADL,RPL,SPR
+ *                                             TGL,TNT,RKL,ADL,RPL,SPR,MTL
  *                            Scope: Core
  *     MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
  *                            perf code: 0x03
  *                            Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
- *                                             ICL,TGL,RKL,ADL,RPL
+ *                                             ICL,TGL,RKL,ADL,RPL,MTL
  *                            Scope: Core
  *     MSR_PKG_C2_RESIDENCY:  Package C2 Residency Counter.
  *                            perf code: 0x00
  *                            Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
  *                                             KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
- *                                             RPL,SPR
+ *                                             RPL,SPR,MTL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
  *                            perf code: 0x01
  *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
  *                                             GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL,
- *                                             ADL,RPL
+ *                                             ADL,RPL,MTL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C6_RESIDENCY:  Package C6 Residency Counter.
  *                            perf code: 0x02
  *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
  *                                             SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- *                                             TGL,TNT,RKL,ADL,RPL,SPR
+ *                                             TGL,TNT,RKL,ADL,RPL,SPR,MTL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
  *                            perf code: 0x03
  *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
- *                                             KBL,CML,ICL,TGL,RKL,ADL,RPL
+ *                                             KBL,CML,ICL,TGL,RKL,ADL,RPL,MTL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
  *                            perf code: 0x04
  *                            Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
- *                                             ADL,RPL
+ *                                             ADL,RPL,MTL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
  *                            perf code: 0x05
  *                            Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
- *                                             ADL,RPL
+ *                                             ADL,RPL,MTL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
  *                            perf code: 0x06
  *                            Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
- *                                             TNT,RKL,ADL,RPL
+ *                                             TNT,RKL,ADL,RPL,MTL
  *                            Scope: Package (physical package)
  *
  */
@@ -686,6 +687,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,          &adl_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,        &adl_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,        &adl_cstates),
+       X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE,          &adl_cstates),
+       X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L,        &adl_cstates),
        { },
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
index 6f1ccc5..459b1aa 100644 (file)
@@ -1833,6 +1833,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,        &adl_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,        &adl_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    &spr_uncore_init),
+       X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X,     &spr_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D,      &snr_uncore_init),
        {},
 };
index ecced3a..c65d890 100644 (file)
@@ -69,6 +69,7 @@ static bool test_intel(int idx, void *data)
        case INTEL_FAM6_BROADWELL_G:
        case INTEL_FAM6_BROADWELL_X:
        case INTEL_FAM6_SAPPHIRERAPIDS_X:
+       case INTEL_FAM6_EMERALDRAPIDS_X:
 
        case INTEL_FAM6_ATOM_SILVERMONT:
        case INTEL_FAM6_ATOM_SILVERMONT_D:
@@ -107,6 +108,8 @@ static bool test_intel(int idx, void *data)
        case INTEL_FAM6_RAPTORLAKE:
        case INTEL_FAM6_RAPTORLAKE_P:
        case INTEL_FAM6_RAPTORLAKE_S:
+       case INTEL_FAM6_METEORLAKE:
+       case INTEL_FAM6_METEORLAKE_L:
                if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
                        return true;
                break;
index f35f1ff..6aaae18 100644 (file)
@@ -1111,6 +1111,7 @@ struct msr_bitmap_range {
 
 /* Xen emulation context */
 struct kvm_xen {
+       struct mutex xen_lock;
        u32 xen_version;
        bool long_mode;
        bool runstate_update_flag;
index a868b76..1f83b05 100644 (file)
@@ -2364,9 +2364,8 @@ static int mp_irqdomain_create(int ioapic)
                return -ENODEV;
        }
 
-       ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops,
-                                                (void *)(long)ioapic);
-
+       ip->irqdomain = irq_domain_create_hierarchy(parent, 0, hwirqs, fn, cfg->ops,
+                                                   (void *)(long)ioapic);
        if (!ip->irqdomain) {
                /* Release fw handle if it was allocated above */
                if (!cfg->dev)
@@ -2374,8 +2373,6 @@ static int mp_irqdomain_create(int ioapic)
                return -ENOMEM;
        }
 
-       ip->irqdomain->parent = parent;
-
        if (cfg->type == IOAPIC_DOMAIN_LEGACY ||
            cfg->type == IOAPIC_DOMAIN_STRICT)
                ioapic_dynirq_base = max(ioapic_dynirq_base,
index efe0c30..77538ab 100644 (file)
@@ -146,6 +146,30 @@ static inline struct rmid_entry *__rmid_entry(u32 rmid)
        return entry;
 }
 
+static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
+{
+       u64 msr_val;
+
+       /*
+        * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
+        * with a valid event code for supported resource type and the bits
+        * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
+        * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
+        * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
+        * are error bits.
+        */
+       wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
+       rdmsrl(MSR_IA32_QM_CTR, msr_val);
+
+       if (msr_val & RMID_VAL_ERROR)
+               return -EIO;
+       if (msr_val & RMID_VAL_UNAVAIL)
+               return -EINVAL;
+
+       *val = msr_val;
+       return 0;
+}
+
 static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom,
                                                 u32 rmid,
                                                 enum resctrl_event_id eventid)
@@ -172,8 +196,12 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
        struct arch_mbm_state *am;
 
        am = get_arch_mbm_state(hw_dom, rmid, eventid);
-       if (am)
+       if (am) {
                memset(am, 0, sizeof(*am));
+
+               /* Record any initial, non-zero count value. */
+               __rmid_read(rmid, eventid, &am->prev_msr);
+       }
 }
 
 static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
@@ -191,25 +219,14 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
        struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
        struct arch_mbm_state *am;
        u64 msr_val, chunks;
+       int ret;
 
        if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
                return -EINVAL;
 
-       /*
-        * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
-        * with a valid event code for supported resource type and the bits
-        * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
-        * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
-        * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
-        * are error bits.
-        */
-       wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
-       rdmsrl(MSR_IA32_QM_CTR, msr_val);
-
-       if (msr_val & RMID_VAL_ERROR)
-               return -EIO;
-       if (msr_val & RMID_VAL_UNAVAIL)
-               return -EINVAL;
+       ret = __rmid_read(rmid, eventid, &msr_val);
+       if (ret)
+               return ret;
 
        am = get_arch_mbm_state(hw_dom, rmid, eventid);
        if (am) {
index e5a48f0..5993da2 100644 (file)
@@ -580,8 +580,10 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
        /*
         * Ensure the task's closid and rmid are written before determining if
         * the task is current that will decide if it will be interrupted.
+        * This pairs with the full barrier between the rq->curr update and
+        * resctrl_sched_in() during context switch.
         */
-       barrier();
+       smp_mb();
 
        /*
         * By now, the task's closid and rmid are set. If the task is current
@@ -2402,6 +2404,14 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
                        WRITE_ONCE(t->rmid, to->mon.rmid);
 
                        /*
+                        * Order the closid/rmid stores above before the loads
+                        * in task_curr(). This pairs with the full barrier
+                        * between the rq->curr update and resctrl_sched_in()
+                        * during context switch.
+                        */
+                       smp_mb();
+
+                       /*
                         * If the task is on a CPU, set the CPU in the mask.
                         * The detection is inaccurate as tasks might move or
                         * schedule before the smp function call takes place.
index b14653b..596061c 100644 (file)
@@ -770,16 +770,22 @@ struct kvm_cpuid_array {
        int nent;
 };
 
+static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array)
+{
+       if (array->nent >= array->maxnent)
+               return NULL;
+
+       return &array->entries[array->nent++];
+}
+
 static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
                                              u32 function, u32 index)
 {
-       struct kvm_cpuid_entry2 *entry;
+       struct kvm_cpuid_entry2 *entry = get_next_cpuid(array);
 
-       if (array->nent >= array->maxnent)
+       if (!entry)
                return NULL;
 
-       entry = &array->entries[array->nent++];
-
        memset(entry, 0, sizeof(*entry));
        entry->function = function;
        entry->index = index;
@@ -956,22 +962,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
                entry->edx = edx.full;
                break;
        }
-       /*
-        * Per Intel's SDM, the 0x1f is a superset of 0xb,
-        * thus they can be handled by common code.
-        */
        case 0x1f:
        case 0xb:
                /*
-                * Populate entries until the level type (ECX[15:8]) of the
-                * previous entry is zero.  Note, CPUID EAX.{0x1f,0xb}.0 is
-                * the starting entry, filled by the primary do_host_cpuid().
+                * No topology; a valid topology is indicated by the presence
+                * of subleaf 1.
                 */
-               for (i = 1; entry->ecx & 0xff00; ++i) {
-                       entry = do_host_cpuid(array, function, i);
-                       if (!entry)
-                               goto out;
-               }
+               entry->eax = entry->ebx = entry->ecx = 0;
                break;
        case 0xd: {
                u64 permitted_xcr0 = kvm_caps.supported_xcr0 & xstate_get_guest_group_perm();
@@ -1202,6 +1199,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
                entry->ebx = entry->ecx = entry->edx = 0;
                break;
        case 0x8000001e:
+               /* Do not return host topology information.  */
+               entry->eax = entry->ebx = entry->ecx = 0;
+               entry->edx = 0; /* reserved */
                break;
        case 0x8000001F:
                if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
index bc9cd70..add65dd 100644 (file)
@@ -138,15 +138,13 @@ void recalc_intercepts(struct vcpu_svm *svm)
                c->intercepts[i] = h->intercepts[i];
 
        if (g->int_ctl & V_INTR_MASKING_MASK) {
-               /* We only want the cr8 intercept bits of L1 */
-               vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
-               vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
-
                /*
-                * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
-                * affect any interrupt we may want to inject; therefore,
-                * interrupt window vmexits are irrelevant to L0.
+                * Once running L2 with HF_VINTR_MASK, EFLAGS.IF and CR8
+                * does not affect any interrupt we may want to inject;
+                * therefore, writes to CR8 are irrelevant to L0, as are
+                * interrupt window vmexits.
                 */
+               vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
                vmcb_clr_intercept(c, INTERCEPT_VINTR);
        }
 
index 2e29bdc..8fd41f5 100644 (file)
@@ -271,7 +271,15 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
         * Attempt to obtain the GPC lock on *both* (if there are two)
         * gfn_to_pfn caches that cover the region.
         */
-       read_lock_irqsave(&gpc1->lock, flags);
+       if (atomic) {
+               local_irq_save(flags);
+               if (!read_trylock(&gpc1->lock)) {
+                       local_irq_restore(flags);
+                       return;
+               }
+       } else {
+               read_lock_irqsave(&gpc1->lock, flags);
+       }
        while (!kvm_gpc_check(gpc1, user_len1)) {
                read_unlock_irqrestore(&gpc1->lock, flags);
 
@@ -304,9 +312,18 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
                 * The guest's runstate_info is split across two pages and we
                 * need to hold and validate both GPCs simultaneously. We can
                 * declare a lock ordering GPC1 > GPC2 because nothing else
-                * takes them more than one at a time.
+                * takes them more than one at a time. Set a subclass on the
+                * gpc1 lock to make lockdep shut up about it.
                 */
-               read_lock(&gpc2->lock);
+               lock_set_subclass(&gpc1->lock.dep_map, 1, _THIS_IP_);
+               if (atomic) {
+                       if (!read_trylock(&gpc2->lock)) {
+                               read_unlock_irqrestore(&gpc1->lock, flags);
+                               return;
+                       }
+               } else {
+                       read_lock(&gpc2->lock);
+               }
 
                if (!kvm_gpc_check(gpc2, user_len2)) {
                        read_unlock(&gpc2->lock);
@@ -590,26 +607,26 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
                        r = -EINVAL;
                } else {
-                       mutex_lock(&kvm->lock);
+                       mutex_lock(&kvm->arch.xen.xen_lock);
                        kvm->arch.xen.long_mode = !!data->u.long_mode;
-                       mutex_unlock(&kvm->lock);
+                       mutex_unlock(&kvm->arch.xen.xen_lock);
                        r = 0;
                }
                break;
 
        case KVM_XEN_ATTR_TYPE_SHARED_INFO:
-               mutex_lock(&kvm->lock);
+               mutex_lock(&kvm->arch.xen.xen_lock);
                r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
-               mutex_unlock(&kvm->lock);
+               mutex_unlock(&kvm->arch.xen.xen_lock);
                break;
 
        case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
                if (data->u.vector && data->u.vector < 0x10)
                        r = -EINVAL;
                else {
-                       mutex_lock(&kvm->lock);
+                       mutex_lock(&kvm->arch.xen.xen_lock);
                        kvm->arch.xen.upcall_vector = data->u.vector;
-                       mutex_unlock(&kvm->lock);
+                       mutex_unlock(&kvm->arch.xen.xen_lock);
                        r = 0;
                }
                break;
@@ -619,9 +636,9 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                break;
 
        case KVM_XEN_ATTR_TYPE_XEN_VERSION:
-               mutex_lock(&kvm->lock);
+               mutex_lock(&kvm->arch.xen.xen_lock);
                kvm->arch.xen.xen_version = data->u.xen_version;
-               mutex_unlock(&kvm->lock);
+               mutex_unlock(&kvm->arch.xen.xen_lock);
                r = 0;
                break;
 
@@ -630,9 +647,9 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                        r = -EOPNOTSUPP;
                        break;
                }
-               mutex_lock(&kvm->lock);
+               mutex_lock(&kvm->arch.xen.xen_lock);
                kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag;
-               mutex_unlock(&kvm->lock);
+               mutex_unlock(&kvm->arch.xen.xen_lock);
                r = 0;
                break;
 
@@ -647,7 +664,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 {
        int r = -ENOENT;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
 
        switch (data->type) {
        case KVM_XEN_ATTR_TYPE_LONG_MODE:
@@ -686,7 +703,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                break;
        }
 
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
        return r;
 }
 
@@ -694,7 +711,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
 {
        int idx, r = -ENOENT;
 
-       mutex_lock(&vcpu->kvm->lock);
+       mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
        idx = srcu_read_lock(&vcpu->kvm->srcu);
 
        switch (data->type) {
@@ -922,7 +939,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
        }
 
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
-       mutex_unlock(&vcpu->kvm->lock);
+       mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
        return r;
 }
 
@@ -930,7 +947,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
 {
        int r = -ENOENT;
 
-       mutex_lock(&vcpu->kvm->lock);
+       mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
 
        switch (data->type) {
        case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
@@ -1013,7 +1030,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
                break;
        }
 
-       mutex_unlock(&vcpu->kvm->lock);
+       mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
        return r;
 }
 
@@ -1106,7 +1123,7 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
             xhc->blob_size_32 || xhc->blob_size_64))
                return -EINVAL;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
 
        if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
                static_branch_inc(&kvm_xen_enabled.key);
@@ -1115,7 +1132,7 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
 
        memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
 
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
        return 0;
 }
 
@@ -1658,15 +1675,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
                mm_borrowed = true;
        }
 
-       /*
-        * For the irqfd workqueue, using the main kvm->lock mutex is
-        * fine since this function is invoked from kvm_set_irq() with
-        * no other lock held, no srcu. In future if it will be called
-        * directly from a vCPU thread (e.g. on hypercall for an IPI)
-        * then it may need to switch to using a leaf-node mutex for
-        * serializing the shared_info mapping.
-        */
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
 
        /*
         * It is theoretically possible for the page to be unmapped
@@ -1695,7 +1704,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
                srcu_read_unlock(&kvm->srcu, idx);
        } while(!rc);
 
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
 
        if (mm_borrowed)
                kthread_unuse_mm(kvm->mm);
@@ -1811,7 +1820,7 @@ static int kvm_xen_eventfd_update(struct kvm *kvm,
        int ret;
 
        /* Protect writes to evtchnfd as well as the idr lookup.  */
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
        evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port);
 
        ret = -ENOENT;
@@ -1842,7 +1851,7 @@ static int kvm_xen_eventfd_update(struct kvm *kvm,
        }
        ret = 0;
 out_unlock:
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
        return ret;
 }
 
@@ -1905,10 +1914,10 @@ static int kvm_xen_eventfd_assign(struct kvm *kvm,
                evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
        }
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
        ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1,
                        GFP_KERNEL);
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
        if (ret >= 0)
                return 0;
 
@@ -1926,9 +1935,9 @@ static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
 {
        struct evtchnfd *evtchnfd;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
        evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port);
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
 
        if (!evtchnfd)
                return -ENOENT;
@@ -1946,7 +1955,7 @@ static int kvm_xen_eventfd_reset(struct kvm *kvm)
        int i;
        int n = 0;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
 
        /*
         * Because synchronize_srcu() cannot be called inside the
@@ -1958,7 +1967,7 @@ static int kvm_xen_eventfd_reset(struct kvm *kvm)
 
        all_evtchnfds = kmalloc_array(n, sizeof(struct evtchnfd *), GFP_KERNEL);
        if (!all_evtchnfds) {
-               mutex_unlock(&kvm->lock);
+               mutex_unlock(&kvm->arch.xen.xen_lock);
                return -ENOMEM;
        }
 
@@ -1967,7 +1976,7 @@ static int kvm_xen_eventfd_reset(struct kvm *kvm)
                all_evtchnfds[n++] = evtchnfd;
                idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
        }
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
 
        synchronize_srcu(&kvm->srcu);
 
@@ -2069,6 +2078,7 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
 
 void kvm_xen_init_vm(struct kvm *kvm)
 {
+       mutex_init(&kvm->arch.xen.xen_lock);
        idr_init(&kvm->arch.xen.evtchn_ports);
        kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
 }
index d398735..cb258f5 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/pti.h>
 #include <asm/text-patching.h>
 #include <asm/memtype.h>
+#include <asm/paravirt.h>
 
 /*
  * We need to define the tracepoints somewhere, and tlb.c
@@ -804,6 +805,9 @@ void __init poking_init(void)
        poking_mm = mm_alloc();
        BUG_ON(!poking_mm);
 
+       /* Xen PV guests need the PGD to be pinned. */
+       paravirt_arch_dup_mmap(NULL, poking_mm);
+
        /*
         * Randomize the poking address, but make sure that the following page
         * will be mapped at the same PMD. We need 2 pages, so find space for 3,
index 46de9cf..fb4b1b5 100644 (file)
@@ -387,7 +387,8 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end,
                u8 mtrr_type, uniform;
 
                mtrr_type = mtrr_type_lookup(start, end, &uniform);
-               if (mtrr_type != MTRR_TYPE_WRBACK)
+               if (mtrr_type != MTRR_TYPE_WRBACK &&
+                   mtrr_type != MTRR_TYPE_INVALID)
                        return _PAGE_CACHE_MODE_UC_MINUS;
 
                return _PAGE_CACHE_MODE_WB;
index 758cbfe..4b3efaa 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/efi.h>
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/bitmap.h>
@@ -442,17 +443,42 @@ static bool is_acpi_reserved(u64 start, u64 end, enum e820_type not_used)
        return mcfg_res.flags;
 }
 
+static bool is_efi_mmio(u64 start, u64 end, enum e820_type not_used)
+{
+#ifdef CONFIG_EFI
+       efi_memory_desc_t *md;
+       u64 size, mmio_start, mmio_end;
+
+       for_each_efi_memory_desc(md) {
+               if (md->type == EFI_MEMORY_MAPPED_IO) {
+                       size = md->num_pages << EFI_PAGE_SHIFT;
+                       mmio_start = md->phys_addr;
+                       mmio_end = mmio_start + size;
+
+                       /*
+                        * N.B. Caller supplies (start, start + size),
+                        * so to match, mmio_end is the first address
+                        * *past* the EFI_MEMORY_MAPPED_IO area.
+                        */
+                       if (mmio_start <= start && end <= mmio_end)
+                               return true;
+               }
+       }
+#endif
+
+       return false;
+}
+
 typedef bool (*check_reserved_t)(u64 start, u64 end, enum e820_type type);
 
 static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
                                     struct pci_mmcfg_region *cfg,
-                                    struct device *dev, int with_e820)
+                                    struct device *dev, const char *method)
 {
        u64 addr = cfg->res.start;
        u64 size = resource_size(&cfg->res);
        u64 old_size = size;
        int num_buses;
-       char *method = with_e820 ? "E820" : "ACPI motherboard resources";
 
        while (!is_reserved(addr, addr + size, E820_TYPE_RESERVED)) {
                size >>= 1;
@@ -464,10 +490,10 @@ static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
                return false;
 
        if (dev)
-               dev_info(dev, "MMCONFIG at %pR reserved in %s\n",
+               dev_info(dev, "MMCONFIG at %pR reserved as %s\n",
                         &cfg->res, method);
        else
-               pr_info(PREFIX "MMCONFIG at %pR reserved in %s\n",
+               pr_info(PREFIX "MMCONFIG at %pR reserved as %s\n",
                       &cfg->res, method);
 
        if (old_size != size) {
@@ -500,7 +526,8 @@ static bool __ref
 pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int early)
 {
        if (!early && !acpi_disabled) {
-               if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, 0))
+               if (is_mmconf_reserved(is_acpi_reserved, cfg, dev,
+                                      "ACPI motherboard resource"))
                        return true;
 
                if (dev)
@@ -513,6 +540,10 @@ pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int e
                               "MMCONFIG at %pR not reserved in "
                               "ACPI motherboard resources\n",
                               &cfg->res);
+
+               if (is_mmconf_reserved(is_efi_mmio, cfg, dev,
+                                      "EfiMemoryMappedIO"))
+                       return true;
        }
 
        /*
@@ -527,7 +558,8 @@ pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int e
        /* Don't try to do this check unless configuration
           type 1 is available. how about type 2 ?*/
        if (raw_pci_ops)
-               return is_mmconf_reserved(e820__mapped_all, cfg, dev, 1);
+               return is_mmconf_reserved(e820__mapped_all, cfg, dev,
+                                         "E820 entry");
 
        return false;
 }
index 1a536a1..ee21d6a 100644 (file)
@@ -166,10 +166,9 @@ static struct irq_domain *uv_get_irq_domain(void)
        if (!fn)
                goto out;
 
-       uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL);
-       if (uv_domain)
-               uv_domain->parent = x86_vector_domain;
-       else
+       uv_domain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0, fn,
+                                               &uv_domain_ops, NULL);
+       if (!uv_domain)
                irq_domain_free_fwnode(fn);
 out:
        mutex_unlock(&uv_lock);
index 48a3eb0..650cdbb 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/elf.h>
 
 
-Elf32_Half elf_core_extra_phdrs(void)
+Elf32_Half elf_core_extra_phdrs(struct coredump_params *cprm)
 {
        return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
 }
@@ -60,7 +60,7 @@ int elf_core_write_extra_data(struct coredump_params *cprm)
        return 1;
 }
 
-size_t elf_core_extra_data_size(void)
+size_t elf_core_extra_data_size(struct coredump_params *cprm)
 {
        if ( vsyscall_ehdr ) {
                const struct elfhdr *const ehdrp =
index 58db86f..9bdc3b6 100644 (file)
@@ -134,11 +134,6 @@ static inline unsigned p2m_mid_index(unsigned long pfn)
        return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
 }
 
-static inline unsigned p2m_index(unsigned long pfn)
-{
-       return pfn % P2M_PER_PAGE;
-}
-
 static void p2m_top_mfn_init(unsigned long *top)
 {
        unsigned i;
index 228e4df..a6d09fe 100644 (file)
@@ -154,11 +154,6 @@ struct thread_struct {
        unsigned long ra; /* kernel's a0: return address and window call size */
        unsigned long sp; /* kernel's a1: stack pointer */
 
-       /* struct xtensa_cpuinfo info; */
-
-       unsigned long bad_vaddr; /* last user fault */
-       unsigned long bad_uaddr; /* last kernel fault accessing user space */
-       unsigned long error_code;
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
        struct perf_event *ptrace_bp[XCHAL_NUM_IBREAK];
        struct perf_event *ptrace_wp[XCHAL_NUM_DBREAK];
@@ -176,10 +171,6 @@ struct thread_struct {
 {                                                                      \
        ra:             0,                                              \
        sp:             sizeof(init_stack) + (long) &init_stack,        \
-       /*info:         {0}, */                                         \
-       bad_vaddr:      0,                                              \
-       bad_uaddr:      0,                                              \
-       error_code:     0,                                              \
 }
 
 
index 0c25e03..cd98366 100644 (file)
@@ -362,8 +362,6 @@ static void do_unaligned_user(struct pt_regs *regs)
        __die_if_kernel("Unhandled unaligned exception in kernel",
                        regs, SIGKILL);
 
-       current->thread.bad_vaddr = regs->excvaddr;
-       current->thread.error_code = -3;
        pr_info_ratelimited("Unaligned memory access to %08lx in '%s' "
                            "(pid = %d, pc = %#010lx)\n",
                            regs->excvaddr, current->comm,
index 8c781b0..faf7cf3 100644 (file)
@@ -206,8 +206,6 @@ good_area:
 bad_area:
        mmap_read_unlock(mm);
        if (user_mode(regs)) {
-               current->thread.bad_vaddr = address;
-               current->thread.error_code = is_write;
                force_sig_fault(SIGSEGV, code, (void *) address);
                return;
        }
@@ -232,7 +230,6 @@ do_sigbus:
        /* Send a sigbus, regardless of whether we were in kernel
         * or user mode.
         */
-       current->thread.bad_vaddr = address;
        force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
 
        /* Kernel mode? Handle exceptions or die */
@@ -252,7 +249,6 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
        if ((entry = search_exception_tables(regs->pc)) != NULL) {
                pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
                         current->comm, regs->pc, entry->fixup);
-               current->thread.bad_uaddr = address;
                regs->pc = entry->fixup;
                return;
        }
index 9321767..b509835 100644 (file)
@@ -283,12 +283,9 @@ static void blk_free_queue(struct request_queue *q)
  *
  * Decrements the refcount of the request_queue and free it when the refcount
  * reaches 0.
- *
- * Context: Can sleep.
  */
 void blk_put_queue(struct request_queue *q)
 {
-       might_sleep();
        if (refcount_dec_and_test(&q->refs))
                blk_free_queue(q);
 }
index 204fe94..a194f30 100644 (file)
@@ -75,7 +75,8 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
 }
 
 #define FIND_CHILD_MIN_SCORE   1
-#define FIND_CHILD_MAX_SCORE   2
+#define FIND_CHILD_MID_SCORE   2
+#define FIND_CHILD_MAX_SCORE   3
 
 static int match_any(struct acpi_device *adev, void *not_used)
 {
@@ -96,8 +97,17 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
                return -ENODEV;
 
        status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
-       if (status == AE_NOT_FOUND)
+       if (status == AE_NOT_FOUND) {
+               /*
+                * Special case: backlight device objects without _STA are
+                * preferred to other objects with the same _ADR value, because
+                * it is more likely that they are actually useful.
+                */
+               if (adev->pnp.type.backlight)
+                       return FIND_CHILD_MID_SCORE;
+
                return FIND_CHILD_MIN_SCORE;
+       }
 
        if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
                return -ENODEV;
index 16dcd31..192d178 100644 (file)
@@ -433,6 +433,13 @@ static const struct dmi_system_id asus_laptop[] = {
                },
        },
        {
+               .ident = "Asus ExpertBook B2402CBA",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"),
+               },
+       },
+       {
                .ident = "Asus ExpertBook B2502",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
index 2743444..0c6f06a 100644 (file)
@@ -1370,9 +1370,12 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
                 * Some devices don't reliably have _HIDs & _CIDs, so add
                 * synthetic HIDs to make sure drivers can find them.
                 */
-               if (acpi_is_video_device(handle))
+               if (acpi_is_video_device(handle)) {
                        acpi_add_id(pnp, ACPI_VIDEO_HID);
-               else if (acpi_bay_match(handle))
+                       pnp->type.backlight = 1;
+                       break;
+               }
+               if (acpi_bay_match(handle))
                        acpi_add_id(pnp, ACPI_BAY_HID);
                else if (acpi_dock_match(handle))
                        acpi_add_id(pnp, ACPI_DOCK_HID);
index 1b78c74..8a541ef 100644 (file)
@@ -50,6 +50,10 @@ static void acpi_video_parse_cmdline(void)
                acpi_backlight_cmdline = acpi_backlight_video;
        if (!strcmp("native", acpi_video_backlight_string))
                acpi_backlight_cmdline = acpi_backlight_native;
+       if (!strcmp("nvidia_wmi_ec", acpi_video_backlight_string))
+               acpi_backlight_cmdline = acpi_backlight_nvidia_wmi_ec;
+       if (!strcmp("apple_gmux", acpi_video_backlight_string))
+               acpi_backlight_cmdline = acpi_backlight_apple_gmux;
        if (!strcmp("none", acpi_video_backlight_string))
                acpi_backlight_cmdline = acpi_backlight_none;
 }
index eceaec3..9695c44 100644 (file)
@@ -640,6 +640,7 @@ config PATA_CS5530
 config PATA_CS5535
        tristate "CS5535 PATA support (Experimental)"
        depends on PCI && (X86_32 || (X86_64 && COMPILE_TEST))
+       depends on !UML
        help
          This option enables support for the NatSemi/AMD CS5535
          companion chip used with the Geode processor family.
index c0227df..4807af1 100644 (file)
@@ -524,7 +524,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
        return 0;
 }
 
-static int xen_blkbk_remove(struct xenbus_device *dev)
+static void xen_blkbk_remove(struct xenbus_device *dev)
 {
        struct backend_info *be = dev_get_drvdata(&dev->dev);
 
@@ -547,8 +547,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
                /* Put the reference we set in xen_blkif_alloc(). */
                xen_blkif_put(be->blkif);
        }
-
-       return 0;
 }
 
 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
index b284892..23ed258 100644 (file)
@@ -2467,7 +2467,7 @@ static void blkback_changed(struct xenbus_device *dev,
        }
 }
 
-static int blkfront_remove(struct xenbus_device *xbdev)
+static void blkfront_remove(struct xenbus_device *xbdev)
 {
        struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
 
@@ -2488,7 +2488,6 @@ static int blkfront_remove(struct xenbus_device *xbdev)
        }
 
        kfree(info);
-       return 0;
 }
 
 static int blkfront_is_ready(struct xenbus_device *dev)
index 3792918..80cca3b 100644 (file)
@@ -360,14 +360,13 @@ static int tpmfront_probe(struct xenbus_device *dev,
        return tpm_chip_register(priv->chip);
 }
 
-static int tpmfront_remove(struct xenbus_device *dev)
+static void tpmfront_remove(struct xenbus_device *dev)
 {
        struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
        struct tpm_private *priv = dev_get_drvdata(&chip->dev);
        tpm_chip_unregister(chip);
        ring_free(priv);
        dev_set_drvdata(&chip->dev, NULL);
-       return 0;
 }
 
 static int tpmfront_resume(struct xenbus_device *dev)
index 204e390..c17bd84 100644 (file)
@@ -307,6 +307,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
                max_perf = min_perf;
 
        amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
+       cpufreq_cpu_put(policy);
 }
 
 static int amd_get_min_freq(struct amd_cpudata *cpudata)
index d180128..c11d22f 100644 (file)
@@ -280,6 +280,7 @@ static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
        policy->cpuinfo.transition_latency = transition_latency;
        policy->dvfs_possible_from_any_cpu = true;
        policy->fast_switch_possible = true;
+       policy->suspend_freq = freq_table[0].frequency;
 
        if (policy_has_boost_freq(policy)) {
                ret = cpufreq_enable_boost_support();
@@ -321,7 +322,6 @@ static struct cpufreq_driver apple_soc_cpufreq_driver = {
        .flags          = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
                          CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV,
        .verify         = cpufreq_generic_frequency_table_verify,
-       .attr           = cpufreq_generic_attr,
        .get            = apple_soc_cpufreq_get_rate,
        .init           = apple_soc_cpufreq_init,
        .exit           = apple_soc_cpufreq_exit,
@@ -329,6 +329,7 @@ static struct cpufreq_driver apple_soc_cpufreq_driver = {
        .fast_switch    = apple_soc_cpufreq_fast_switch,
        .register_em    = cpufreq_register_em_with_opp,
        .attr           = apple_soc_cpufreq_hw_attr,
+       .suspend        = cpufreq_generic_suspend,
 };
 
 static int __init apple_soc_cpufreq_module_init(void)
index c10fc33..b74289a 100644 (file)
@@ -445,7 +445,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
                return -ENODEV;
        }
 
-       clk = clk_get(cpu_dev, 0);
+       clk = clk_get(cpu_dev, NULL);
        if (IS_ERR(clk)) {
                dev_err(cpu_dev, "Cannot get clock for CPU0\n");
                return PTR_ERR(clk);
index 432dfb4..022e355 100644 (file)
@@ -487,7 +487,8 @@ static unsigned int get_perf_level_count(struct cpufreq_policy *policy)
        cpu_data = policy->driver_data;
        perf_caps = &cpu_data->perf_caps;
        max_cap = arch_scale_cpu_capacity(cpu);
-       min_cap = div_u64(max_cap * perf_caps->lowest_perf, perf_caps->highest_perf);
+       min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
+                         perf_caps->highest_perf);
        if ((min_cap == 0) || (max_cap < min_cap))
                return 0;
        return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP;
@@ -519,10 +520,10 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
        cpu_data = policy->driver_data;
        perf_caps = &cpu_data->perf_caps;
        max_cap = arch_scale_cpu_capacity(cpu_dev->id);
-       min_cap = div_u64(max_cap * perf_caps->lowest_perf,
-                       perf_caps->highest_perf);
-
-       perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
+       min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
+                         perf_caps->highest_perf);
+       perf_step = div_u64((u64)CPPC_EM_CAP_STEP * perf_caps->highest_perf,
+                           max_cap);
        min_step = min_cap / CPPC_EM_CAP_STEP;
        max_step = max_cap / CPPC_EM_CAP_STEP;
 
index 8ab6728..e857036 100644 (file)
@@ -137,6 +137,7 @@ static const struct of_device_id blocklist[] __initconst = {
        { .compatible = "nvidia,tegra30", },
        { .compatible = "nvidia,tegra124", },
        { .compatible = "nvidia,tegra210", },
+       { .compatible = "nvidia,tegra234", },
 
        { .compatible = "qcom,apq8096", },
        { .compatible = "qcom,msm8996", },
@@ -150,6 +151,7 @@ static const struct of_device_id blocklist[] __initconst = {
        { .compatible = "qcom,sdm845", },
        { .compatible = "qcom,sm6115", },
        { .compatible = "qcom,sm6350", },
+       { .compatible = "qcom,sm6375", },
        { .compatible = "qcom,sm8150", },
        { .compatible = "qcom,sm8250", },
        { .compatible = "qcom,sm8350", },
index 340fed3..9505a81 100644 (file)
@@ -649,9 +649,10 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
 {
        struct clk_hw_onecell_data *clk_data;
        struct device *dev = &pdev->dev;
+       struct device_node *soc_node;
        struct device *cpu_dev;
        struct clk *clk;
-       int ret, i, num_domains;
+       int ret, i, num_domains, reg_sz;
 
        clk = clk_get(dev, "xo");
        if (IS_ERR(clk))
@@ -679,7 +680,21 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
                return ret;
 
        /* Allocate qcom_cpufreq_data based on the available frequency domains in DT */
-       num_domains = of_property_count_elems_of_size(dev->of_node, "reg", sizeof(u32) * 4);
+       soc_node = of_get_parent(dev->of_node);
+       if (!soc_node)
+               return -EINVAL;
+
+       ret = of_property_read_u32(soc_node, "#address-cells", &reg_sz);
+       if (ret)
+               goto of_exit;
+
+       ret = of_property_read_u32(soc_node, "#size-cells", &i);
+       if (ret)
+               goto of_exit;
+
+       reg_sz += i;
+
+       num_domains = of_property_count_elems_of_size(dev->of_node, "reg", sizeof(u32) * reg_sz);
        if (num_domains <= 0)
                return num_domains;
 
@@ -743,6 +758,9 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
        else
                dev_dbg(dev, "QCOM CPUFreq HW driver initialized\n");
 
+of_exit:
+       of_node_put(soc_node);
+
        return ret;
 }
 
index 53100fb..12205e2 100644 (file)
@@ -3,7 +3,7 @@
  * Microchip / Atmel ECC (I2C) driver.
  *
  * Copyright (c) 2017, Microchip Technology Inc.
- * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ * Author: Tudor Ambarus
  */
 
 #include <linux/delay.h>
@@ -411,6 +411,6 @@ static void __exit atmel_ecc_exit(void)
 module_init(atmel_ecc_init);
 module_exit(atmel_ecc_exit);
 
-MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
+MODULE_AUTHOR("Tudor Ambarus");
 MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
 MODULE_LICENSE("GPL v2");
index 81ce09b..55bff1e 100644 (file)
@@ -3,7 +3,7 @@
  * Microchip / Atmel ECC (I2C) driver.
  *
  * Copyright (c) 2017, Microchip Technology Inc.
- * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ * Author: Tudor Ambarus
  */
 
 #include <linux/bitrev.h>
@@ -390,6 +390,6 @@ static void __exit atmel_i2c_exit(void)
 module_init(atmel_i2c_init);
 module_exit(atmel_i2c_exit);
 
-MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
+MODULE_AUTHOR("Tudor Ambarus");
 MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
 MODULE_LICENSE("GPL v2");
index 48929ef..35f7857 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (c) 2017, Microchip Technology Inc.
- * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ * Author: Tudor Ambarus
  */
 
 #ifndef __ATMEL_I2C_H__
index 19522c5..878deb4 100644 (file)
@@ -394,17 +394,16 @@ static void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
  *     Then restart the workq on the new delay
  */
 void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
-                                       unsigned long value)
+                                   unsigned long msec)
 {
-       unsigned long jiffs = msecs_to_jiffies(value);
-
-       if (value == 1000)
-               jiffs = round_jiffies_relative(value);
-
-       edac_dev->poll_msec = value;
-       edac_dev->delay     = jiffs;
+       edac_dev->poll_msec = msec;
+       edac_dev->delay     = msecs_to_jiffies(msec);
 
-       edac_mod_work(&edac_dev->work, jiffs);
+       /* See comment in edac_device_workq_setup() above */
+       if (edac_dev->poll_msec == 1000)
+               edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+       else
+               edac_mod_work(&edac_dev->work, edac_dev->delay);
 }
 
 int edac_device_alloc_index(void)
index 763c076..47593af 100644 (file)
@@ -53,7 +53,7 @@ bool edac_stop_work(struct delayed_work *work);
 bool edac_mod_work(struct delayed_work *work, unsigned long delay);
 
 extern void edac_device_reset_delay_period(struct edac_device_ctl_info
-                                          *edac_dev, unsigned long value);
+                                          *edac_dev, unsigned long msec);
 extern void edac_mc_reset_delay_period(unsigned long value);
 
 /*
index 61b76ec..19fba25 100644 (file)
@@ -174,8 +174,10 @@ static int highbank_mc_probe(struct platform_device *pdev)
        drvdata = mci->pvt_info;
        platform_set_drvdata(pdev, mci);
 
-       if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
-               return -ENOMEM;
+       if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+               res = -ENOMEM;
+               goto free;
+       }
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!r) {
@@ -243,6 +245,7 @@ err2:
        edac_mc_del_mc(&pdev->dev);
 err:
        devres_release_group(&pdev->dev, NULL);
+free:
        edac_mc_free(mci);
        return res;
 }
index 09716ee..a2b0cbc 100644 (file)
@@ -394,8 +394,8 @@ static int __init efisubsys_init(void)
        efi_kobj = kobject_create_and_add("efi", firmware_kobj);
        if (!efi_kobj) {
                pr_err("efi: Firmware registration failed.\n");
-               destroy_workqueue(efi_rts_wq);
-               return -ENOMEM;
+               error = -ENOMEM;
+               goto err_destroy_wq;
        }
 
        if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
@@ -443,7 +443,10 @@ err_unregister:
 err_put:
        kobject_put(efi_kobj);
        efi_kobj = NULL;
-       destroy_workqueue(efi_rts_wq);
+err_destroy_wq:
+       if (efi_rts_wq)
+               destroy_workqueue(efi_rts_wq);
+
        return error;
 }
 
index 7feee3d..1fba4e0 100644 (file)
@@ -62,6 +62,7 @@ struct efi_runtime_work efi_rts_work;
                                                                        \
        if (!efi_enabled(EFI_RUNTIME_SERVICES)) {                       \
                pr_warn_once("EFI Runtime Services are disabled!\n");   \
+               efi_rts_work.status = EFI_DEVICE_ERROR;                 \
                goto exit;                                              \
        }                                                               \
                                                                        \
index 2652c39..33ae947 100644 (file)
@@ -93,14 +93,19 @@ static int coreboot_table_populate(struct device *dev, void *ptr)
        for (i = 0; i < header->table_entries; i++) {
                entry = ptr_entry;
 
-               device = kzalloc(sizeof(struct device) + entry->size, GFP_KERNEL);
+               if (entry->size < sizeof(*entry)) {
+                       dev_warn(dev, "coreboot table entry too small!\n");
+                       return -EINVAL;
+               }
+
+               device = kzalloc(sizeof(device->dev) + entry->size, GFP_KERNEL);
                if (!device)
                        return -ENOMEM;
 
                device->dev.parent = dev;
                device->dev.bus = &coreboot_bus_type;
                device->dev.release = coreboot_device_release;
-               memcpy(&device->entry, ptr_entry, entry->size);
+               memcpy(device->raw, ptr_entry, entry->size);
 
                switch (device->entry.tag) {
                case LB_TAG_CBMEM_ENTRY:
index 37f4d33..d814dca 100644 (file)
@@ -79,6 +79,7 @@ struct coreboot_device {
                struct lb_cbmem_ref cbmem_ref;
                struct lb_cbmem_entry cbmem_entry;
                struct lb_framebuffer framebuffer;
+               DECLARE_FLEX_ARRAY(u8, raw);
        };
 };
 
index e7bcfca..447ee4e 100644 (file)
@@ -440,6 +440,9 @@ static const struct file_operations psci_debugfs_ops = {
 
 static int __init psci_debugfs_init(void)
 {
+       if (!invoke_psci_fn || !psci_ops.get_version)
+               return 0;
+
        return PTR_ERR_OR_ZERO(debugfs_create_file("psci", 0444, NULL, NULL,
                                                   &psci_debugfs_ops));
 }
index b15091d..3b5c537 100644 (file)
@@ -2099,7 +2099,7 @@ int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_b
        }
 
        amdgpu_amdkfd_remove_eviction_fence(
-               bo, bo->kfd_bo->process_info->eviction_fence);
+               bo, bo->vm_bo->vm->process_info->eviction_fence);
 
        amdgpu_bo_unreserve(bo);
 
index 8516c81..7b5ce00 100644 (file)
@@ -61,6 +61,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
                amdgpu_ctx_put(p->ctx);
                return -ECANCELED;
        }
+
+       amdgpu_sync_create(&p->sync);
        return 0;
 }
 
@@ -452,18 +454,6 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
        }
 
        r = amdgpu_sync_fence(&p->sync, fence);
-       if (r)
-               goto error;
-
-       /*
-        * When we have an explicit dependency it might be necessary to insert a
-        * pipeline sync to make sure that all caches etc are flushed and the
-        * next job actually sees the results from the previous one.
-        */
-       if (fence->context == p->gang_leader->base.entity->fence_context)
-               r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
-
-error:
        dma_fence_put(fence);
        return r;
 }
@@ -1188,10 +1178,19 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 {
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+       struct drm_gpu_scheduler *sched;
        struct amdgpu_bo_list_entry *e;
+       struct dma_fence *fence;
        unsigned int i;
        int r;
 
+       r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
+       if (r) {
+               if (r != -ERESTARTSYS)
+                       DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
+               return r;
+       }
+
        list_for_each_entry(e, &p->validated, tv.head) {
                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
                struct dma_resv *resv = bo->tbo.base.resv;
@@ -1211,10 +1210,24 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
                        return r;
        }
 
-       r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
-       if (r && r != -ERESTARTSYS)
-               DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
-       return r;
+       sched = p->gang_leader->base.entity->rq->sched;
+       while ((fence = amdgpu_sync_get_fence(&p->sync))) {
+               struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
+
+               /*
+                * When we have an dependency it might be necessary to insert a
+                * pipeline sync to make sure that all caches etc are flushed and the
+                * next job actually sees the results from the previous one
+                * before we start executing on the same scheduler ring.
+                */
+               if (!s_fence || s_fence->sched != sched)
+                       continue;
+
+               r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
+               if (r)
+                       return r;
+       }
+       return 0;
 }
 
 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
@@ -1254,9 +1267,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
                        continue;
 
                fence = &p->jobs[i]->base.s_fence->scheduled;
+               dma_fence_get(fence);
                r = drm_sched_job_add_dependency(&leader->base, fence);
-               if (r)
+               if (r) {
+                       dma_fence_put(fence);
                        goto error_cleanup;
+               }
        }
 
        if (p->gang_size > 1) {
@@ -1344,6 +1360,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
 {
        unsigned i;
 
+       amdgpu_sync_free(&parser->sync);
        for (i = 0; i < parser->num_post_deps; i++) {
                drm_syncobj_put(parser->post_deps[i].syncobj);
                kfree(parser->post_deps[i].chain);
index afe6af9..2f28a8c 100644 (file)
@@ -36,6 +36,7 @@
 #include <generated/utsrelease.h>
 #include <linux/pci-p2pdma.h>
 
+#include <drm/drm_aperture.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_probe_helper.h>
@@ -90,6 +91,8 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
 #define AMDGPU_MAX_RETRY_LIMIT         2
 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
 
+static const struct drm_driver amdgpu_kms_driver;
+
 const char *amdgpu_asic_name[] = {
        "TAHITI",
        "PITCAIRN",
@@ -3687,6 +3690,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        if (r)
                return r;
 
+       /* Get rid of things like offb */
+       r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
+       if (r)
+               return r;
+
        /* Enable TMZ based on IP_VERSION */
        amdgpu_gmc_tmz_set(adev);
 
index 1353ffd..cd4caaa 100644 (file)
@@ -23,7 +23,6 @@
  */
 
 #include <drm/amdgpu_drm.h>
-#include <drm/drm_aperture.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_fbdev_generic.h>
 #include <drm/drm_gem.h>
@@ -2122,11 +2121,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
        }
 #endif
 
-       /* Get rid of things like offb */
-       ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver);
-       if (ret)
-               return ret;
-
        adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
        if (IS_ERR(adev))
                return PTR_ERR(adev);
index 4e684c2..25a68d8 100644 (file)
@@ -470,8 +470,9 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
        return true;
 
 fail:
-       DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
-                 man->size);
+       if (man)
+               DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
+                         man->size);
        return false;
 }
 
index bac7976..dcd8c06 100644 (file)
@@ -391,8 +391,10 @@ int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job)
 
                dma_fence_get(f);
                r = drm_sched_job_add_dependency(&job->base, f);
-               if (r)
+               if (r) {
+                       dma_fence_put(f);
                        return r;
+               }
        }
        return 0;
 }
index faa1214..9fa1d81 100644 (file)
@@ -882,7 +882,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
                kfree(rsv);
 
        list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
-               drm_buddy_free_list(&mgr->mm, &rsv->blocks);
+               drm_buddy_free_list(&mgr->mm, &rsv->allocated);
                kfree(rsv);
        }
        drm_buddy_fini(&mgr->mm);
index ecb4c3a..c06ada0 100644 (file)
@@ -200,7 +200,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
        queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
 
        if (q->wptr_bo) {
-               wptr_addr_off = (uint64_t)q->properties.write_ptr - (uint64_t)q->wptr_bo->kfd_bo->va;
+               wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
                queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
        }
 
index 814f998..b94d2c1 100644 (file)
@@ -570,6 +570,15 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
                goto reserve_bo_failed;
        }
 
+       if (clear) {
+               r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
+               if (r) {
+                       pr_debug("failed %d to sync bo\n", r);
+                       amdgpu_bo_unreserve(bo);
+                       goto reserve_bo_failed;
+               }
+       }
+
        r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
        if (r) {
                pr_debug("failed %d to reserve bo\n", r);
index e54b760..b4373b6 100644 (file)
@@ -1261,7 +1261,8 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
                                uint32_t speed)
 {
        struct amdgpu_device *adev = smu->adev;
-       uint32_t tach_period, crystal_clock_freq;
+       uint32_t crystal_clock_freq = 2500;
+       uint32_t tach_period;
        int ret;
 
        if (!speed)
@@ -1271,7 +1272,6 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
        if (ret)
                return ret;
 
-       crystal_clock_freq = amdgpu_asic_get_xclk(adev);
        tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
        WREG32_SOC15(THM, 0, regCG_TACH_CTRL,
                     REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL),
@@ -2298,6 +2298,10 @@ bool smu_v13_0_baco_is_support(struct smu_context *smu)
            !smu_baco->platform_support)
                return false;
 
+       /* return true if ASIC is in BACO state already */
+       if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
+               return true;
+
        if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
            !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
                return false;
index 9643b21..4c20d17 100644 (file)
@@ -213,6 +213,7 @@ static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] =
        FEA_MAP(SOC_PCC),
        [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
        [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+       [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
 };
 
 static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
index 5c6c6ad..e87db7e 100644 (file)
@@ -192,6 +192,7 @@ static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] =
        FEA_MAP(SOC_PCC),
        [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
        [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+       [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
 };
 
 static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
index 11bb593..3d1f50f 100644 (file)
@@ -38,6 +38,25 @@ static void drm_block_free(struct drm_buddy *mm,
        kmem_cache_free(slab_blocks, block);
 }
 
+static void list_insert_sorted(struct drm_buddy *mm,
+                              struct drm_buddy_block *block)
+{
+       struct drm_buddy_block *node;
+       struct list_head *head;
+
+       head = &mm->free_list[drm_buddy_block_order(block)];
+       if (list_empty(head)) {
+               list_add(&block->link, head);
+               return;
+       }
+
+       list_for_each_entry(node, head, link)
+               if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
+                       break;
+
+       __list_add(&block->link, node->link.prev, &node->link);
+}
+
 static void mark_allocated(struct drm_buddy_block *block)
 {
        block->header &= ~DRM_BUDDY_HEADER_STATE;
@@ -52,8 +71,7 @@ static void mark_free(struct drm_buddy *mm,
        block->header &= ~DRM_BUDDY_HEADER_STATE;
        block->header |= DRM_BUDDY_FREE;
 
-       list_add(&block->link,
-                &mm->free_list[drm_buddy_block_order(block)]);
+       list_insert_sorted(mm, block);
 }
 
 static void mark_split(struct drm_buddy_block *block)
@@ -387,20 +405,26 @@ err_undo:
 }
 
 static struct drm_buddy_block *
-get_maxblock(struct list_head *head)
+get_maxblock(struct drm_buddy *mm, unsigned int order)
 {
        struct drm_buddy_block *max_block = NULL, *node;
+       unsigned int i;
 
-       max_block = list_first_entry_or_null(head,
-                                            struct drm_buddy_block,
-                                            link);
-       if (!max_block)
-               return NULL;
+       for (i = order; i <= mm->max_order; ++i) {
+               if (!list_empty(&mm->free_list[i])) {
+                       node = list_last_entry(&mm->free_list[i],
+                                              struct drm_buddy_block,
+                                              link);
+                       if (!max_block) {
+                               max_block = node;
+                               continue;
+                       }
 
-       list_for_each_entry(node, head, link) {
-               if (drm_buddy_block_offset(node) >
-                   drm_buddy_block_offset(max_block))
-                       max_block = node;
+                       if (drm_buddy_block_offset(node) >
+                           drm_buddy_block_offset(max_block)) {
+                               max_block = node;
+                       }
+               }
        }
 
        return max_block;
@@ -412,20 +436,23 @@ alloc_from_freelist(struct drm_buddy *mm,
                    unsigned long flags)
 {
        struct drm_buddy_block *block = NULL;
-       unsigned int i;
+       unsigned int tmp;
        int err;
 
-       for (i = order; i <= mm->max_order; ++i) {
-               if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
-                       block = get_maxblock(&mm->free_list[i]);
-                       if (block)
-                               break;
-               } else {
-                       block = list_first_entry_or_null(&mm->free_list[i],
-                                                        struct drm_buddy_block,
-                                                        link);
-                       if (block)
-                               break;
+       if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
+               block = get_maxblock(mm, order);
+               if (block)
+                       /* Store the obtained block order */
+                       tmp = drm_buddy_block_order(block);
+       } else {
+               for (tmp = order; tmp <= mm->max_order; ++tmp) {
+                       if (!list_empty(&mm->free_list[tmp])) {
+                               block = list_last_entry(&mm->free_list[tmp],
+                                                       struct drm_buddy_block,
+                                                       link);
+                               if (block)
+                                       break;
+                       }
                }
        }
 
@@ -434,18 +461,18 @@ alloc_from_freelist(struct drm_buddy *mm,
 
        BUG_ON(!drm_buddy_block_is_free(block));
 
-       while (i != order) {
+       while (tmp != order) {
                err = split_block(mm, block);
                if (unlikely(err))
                        goto err_undo;
 
                block = block->right;
-               i--;
+               tmp--;
        }
        return block;
 
 err_undo:
-       if (i != order)
+       if (tmp != order)
                __drm_buddy_free(mm, block);
        return ERR_PTR(err);
 }
index 52d8800..3659f04 100644 (file)
@@ -304,6 +304,12 @@ static const struct dmi_system_id orientation_data[] = {
                  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
                },
                .driver_data = (void *)&lcd1200x1920_rightside_up,
+       }, {    /* Lenovo Ideapad D330-10IGL (HD) */
+               .matches = {
+                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
+               },
+               .driver_data = (void *)&lcd800x1280_rightside_up,
        }, {    /* Lenovo Yoga Book X90F / X91F / X91L */
                .matches = {
                  /* Non exact match to match all versions */
index 7f2831e..6250de9 100644 (file)
@@ -1688,6 +1688,10 @@ void i915_gem_init__contexts(struct drm_i915_private *i915)
        init_contexts(&i915->gem.contexts);
 }
 
+/*
+ * Note that this implicitly consumes the ctx reference, by placing
+ * the ctx in the context_xa.
+ */
 static void gem_context_register(struct i915_gem_context *ctx,
                                 struct drm_i915_file_private *fpriv,
                                 u32 id)
@@ -1703,10 +1707,6 @@ static void gem_context_register(struct i915_gem_context *ctx,
        snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
                 current->comm, pid_nr(ctx->pid));
 
-       /* And finally expose ourselves to userspace via the idr */
-       old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
-       WARN_ON(old);
-
        spin_lock(&ctx->client->ctx_lock);
        list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list);
        spin_unlock(&ctx->client->ctx_lock);
@@ -1714,6 +1714,10 @@ static void gem_context_register(struct i915_gem_context *ctx,
        spin_lock(&i915->gem.contexts.lock);
        list_add_tail(&ctx->link, &i915->gem.contexts.list);
        spin_unlock(&i915->gem.contexts.lock);
+
+       /* And finally expose ourselves to userspace via the idr */
+       old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
+       WARN_ON(old);
 }
 
 int i915_gem_context_open(struct drm_i915_private *i915,
@@ -2199,14 +2203,22 @@ finalize_create_context_locked(struct drm_i915_file_private *file_priv,
        if (IS_ERR(ctx))
                return ctx;
 
+       /*
+        * One for the xarray and one for the caller.  We need to grab
+        * the reference *prior* to making the ctx visble to userspace
+        * in gem_context_register(), as at any point after that
+        * userspace can try to race us with another thread destroying
+        * the context under our feet.
+        */
+       i915_gem_context_get(ctx);
+
        gem_context_register(ctx, file_priv, id);
 
        old = xa_erase(&file_priv->proto_context_xa, id);
        GEM_BUG_ON(old != pc);
        proto_context_close(file_priv->dev_priv, pc);
 
-       /* One for the xarray and one for the caller */
-       return i915_gem_context_get(ctx);
+       return ctx;
 }
 
 struct i915_gem_context *
index c3cd926..4a14f87 100644 (file)
 #define GEN9_WM_CHICKEN3                       _MMIO(0x5588)
 #define   GEN9_FACTOR_IN_CLR_VAL_HIZ           (1 << 9)
 
-#define CHICKEN_RASTER_1                       _MMIO(0x6204)
+#define CHICKEN_RASTER_1                       MCR_REG(0x6204)
 #define   DIS_SF_ROUND_NEAREST_EVEN            REG_BIT(8)
 
-#define CHICKEN_RASTER_2                       _MMIO(0x6208)
+#define CHICKEN_RASTER_2                       MCR_REG(0x6208)
 #define   TBIMR_FAST_CLIP                      REG_BIT(5)
 
 #define VFLSKPD                                        MCR_REG(0x62a8)
index 24736eb..78dc5e4 100644 (file)
@@ -278,6 +278,7 @@ out:
 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
 {
        struct intel_uncore *uncore = gt->uncore;
+       int loops = 2;
        int err;
 
        /*
@@ -285,18 +286,39 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
         * for fifo space for the write or forcewake the chip for
         * the read
         */
-       intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
+       do {
+               intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
 
-       /* Wait for the device to ack the reset requests */
-       err = __intel_wait_for_register_fw(uncore,
-                                          GEN6_GDRST, hw_domain_mask, 0,
-                                          500, 0,
-                                          NULL);
+               /*
+                * Wait for the device to ack the reset requests.
+                *
+                * On some platforms, e.g. Jasperlake, we see that the
+                * engine register state is not cleared until shortly after
+                * GDRST reports completion, causing a failure as we try
+                * to immediately resume while the internal state is still
+                * in flux. If we immediately repeat the reset, the second
+                * reset appears to serialise with the first, and since
+                * it is a no-op, the registers should retain their reset
+                * value. However, there is still a concern that upon
+                * leaving the second reset, the internal engine state
+                * is still in flux and not ready for resuming.
+                */
+               err = __intel_wait_for_register_fw(uncore, GEN6_GDRST,
+                                                  hw_domain_mask, 0,
+                                                  2000, 0,
+                                                  NULL);
+       } while (err == 0 && --loops);
        if (err)
                GT_TRACE(gt,
                         "Wait for 0x%08x engines reset failed\n",
                         hw_domain_mask);
 
+       /*
+        * As we have observed that the engine state is still volatile
+        * after GDRST is acked, impose a small delay to let everything settle.
+        */
+       udelay(50);
+
        return err;
 }
 
index 2afb4f8..5be2f91 100644 (file)
@@ -645,7 +645,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
 static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
                                   struct i915_wa_list *wal)
 {
-       wa_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
+       wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
        wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
                             REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
        wa_mcr_add(wal,
@@ -775,7 +775,7 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
                wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
 
        /* Wa_15010599737:dg2 */
-       wa_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
+       wa_mcr_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
 }
 
 static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
index 3a33be5..135390d 100644 (file)
@@ -2116,7 +2116,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
        if (!obj->mm.rsgt)
                return -EBUSY;
 
-       err = dma_resv_reserve_fences(obj->base.resv, 1);
+       err = dma_resv_reserve_fences(obj->base.resv, 2);
        if (err)
                return -EBUSY;
 
index 6484b97..f3c9600 100644 (file)
@@ -876,7 +876,8 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
 #define GBIF_CLIENT_HALT_MASK             BIT(0)
 #define GBIF_ARB_HALT_MASK                BIT(1)
 
-static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
+static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu,
+               bool gx_off)
 {
        struct msm_gpu *gpu = &adreno_gpu->base;
 
@@ -889,9 +890,11 @@ static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
                return;
        }
 
-       /* Halt the gx side of GBIF */
-       gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
-       spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
+       if (gx_off) {
+               /* Halt the gx side of GBIF */
+               gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
+               spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
+       }
 
        /* Halt new client requests on GBIF */
        gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
@@ -929,7 +932,7 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
        /* Halt the gmu cm3 core */
        gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
 
-       a6xx_bus_clear_pending_transactions(adreno_gpu);
+       a6xx_bus_clear_pending_transactions(adreno_gpu, true);
 
        /* Reset GPU core blocks */
        gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
@@ -1083,7 +1086,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
                        return;
                }
 
-               a6xx_bus_clear_pending_transactions(adreno_gpu);
+               a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
 
                /* tell the GMU we want to slumber */
                ret = a6xx_gmu_notify_slumber(gmu);
index 36c8fb6..3be0f29 100644 (file)
@@ -1270,6 +1270,12 @@ static void a6xx_recover(struct msm_gpu *gpu)
        if (hang_debug)
                a6xx_dump(gpu);
 
+       /*
+        * To handle recovery specific sequences during the rpm suspend we are
+        * about to trigger
+        */
+       a6xx_gpu->hung = true;
+
        /* Halt SQE first */
        gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
 
@@ -1312,6 +1318,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
        mutex_unlock(&gpu->active_lock);
 
        msm_gpu_hw_init(gpu);
+       a6xx_gpu->hung = false;
 }
 
 static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
index ab853f6..eea2e60 100644 (file)
@@ -32,6 +32,7 @@ struct a6xx_gpu {
        void *llc_slice;
        void *htw_llc_slice;
        bool have_mmu500;
+       bool hung;
 };
 
 #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
index 5d4b1c9..b4f9b13 100644 (file)
@@ -29,11 +29,9 @@ enum {
        ADRENO_FW_MAX,
 };
 
-enum adreno_quirks {
-       ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
-       ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
-       ADRENO_QUIRK_LMLOADKILL_DISABLE = 3,
-};
+#define ADRENO_QUIRK_TWO_PASS_USE_WFI          BIT(0)
+#define ADRENO_QUIRK_FAULT_DETECT_MASK         BIT(1)
+#define ADRENO_QUIRK_LMLOADKILL_DISABLE                BIT(2)
 
 struct adreno_rev {
        uint8_t  core;
@@ -65,7 +63,7 @@ struct adreno_info {
        const char *name;
        const char *fw[ADRENO_FW_MAX];
        uint32_t gmem;
-       enum adreno_quirks quirks;
+       u64 quirks;
        struct msm_gpu *(*init)(struct drm_device *dev);
        const char *zapfw;
        u32 inactive_period;
index 7cbcef6..62f6ff6 100644 (file)
@@ -132,7 +132,6 @@ static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
  * dpu_encoder_phys_wb_setup_fb - setup output framebuffer
  * @phys_enc:  Pointer to physical encoder
  * @fb:                Pointer to output framebuffer
- * @wb_roi:    Pointer to output region of interest
  */
 static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
                struct drm_framebuffer *fb)
@@ -692,7 +691,7 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
 
 /**
  * dpu_encoder_phys_wb_init - initialize writeback encoder
- * @init:      Pointer to init info structure with initialization params
+ * @p: Pointer to init info structure with initialization params
  */
 struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
                struct dpu_enc_phys_init_params *p)
index d030a93..cc3efed 100644 (file)
@@ -423,6 +423,10 @@ void dp_aux_isr(struct drm_dp_aux *dp_aux)
 
        isr = dp_catalog_aux_get_irq(aux->catalog);
 
+       /* no interrupts pending, return immediately */
+       if (!isr)
+               return;
+
        if (!aux->cmd_busy)
                return;
 
index 4d3fdc8..97372bb 100644 (file)
@@ -532,11 +532,19 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
 
        ret = devm_pm_runtime_enable(&pdev->dev);
        if (ret)
-               return ret;
+               goto err_put_phy;
 
        platform_set_drvdata(pdev, hdmi);
 
-       return component_add(&pdev->dev, &msm_hdmi_ops);
+       ret = component_add(&pdev->dev, &msm_hdmi_ops);
+       if (ret)
+               goto err_put_phy;
+
+       return 0;
+
+err_put_phy:
+       msm_hdmi_put_phy(hdmi);
+       return ret;
 }
 
 static int msm_hdmi_dev_remove(struct platform_device *pdev)
index 8b0b0ac..45e81eb 100644 (file)
@@ -1278,7 +1278,7 @@ void msm_drv_shutdown(struct platform_device *pdev)
         * msm_drm_init, drm_dev->registered is used as an indicator that the
         * shutdown will be successful.
         */
-       if (drm && drm->registered)
+       if (drm && drm->registered && priv->kms)
                drm_atomic_helper_shutdown(drm);
 }
 
index 86b28ad..2527afe 100644 (file)
@@ -47,15 +47,17 @@ struct msm_mdss {
 static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
                                            struct msm_mdss *msm_mdss)
 {
-       struct icc_path *path0 = of_icc_get(dev, "mdp0-mem");
-       struct icc_path *path1 = of_icc_get(dev, "mdp1-mem");
+       struct icc_path *path0;
+       struct icc_path *path1;
 
+       path0 = of_icc_get(dev, "mdp0-mem");
        if (IS_ERR_OR_NULL(path0))
                return PTR_ERR_OR_ZERO(path0);
 
        msm_mdss->path[0] = path0;
        msm_mdss->num_paths = 1;
 
+       path1 = of_icc_get(dev, "mdp1-mem");
        if (!IS_ERR_OR_NULL(path1)) {
                msm_mdss->path[1] = path1;
                msm_mdss->num_paths++;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
deleted file mode 100644 (file)
index e87de79..0000000
+++ /dev/null
@@ -1,613 +0,0 @@
-/*
- * Copyright © 2007 David Airlie
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *     David Airlie
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/tty.h>
-#include <linux/sysrq.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/screen_info.h>
-#include <linux/vga_switcheroo.h>
-#include <linux/console.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_atomic.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_gem.h"
-#include "nouveau_bo.h"
-#include "nouveau_fbcon.h"
-#include "nouveau_chan.h"
-#include "nouveau_vmm.h"
-
-#include "nouveau_crtc.h"
-
-MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
-int nouveau_nofbaccel = 0;
-module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
-
-MODULE_PARM_DESC(fbcon_bpp, "fbcon bits-per-pixel (default: auto)");
-static int nouveau_fbcon_bpp;
-module_param_named(fbcon_bpp, nouveau_fbcon_bpp, int, 0400);
-
-static void
-nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       struct nvif_device *device = &drm->client.device;
-       int ret;
-
-       if (info->state != FBINFO_STATE_RUNNING)
-               return;
-
-       ret = -ENODEV;
-       if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
-           mutex_trylock(&drm->client.mutex)) {
-               if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
-                       ret = nv04_fbcon_fillrect(info, rect);
-               else
-               if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
-                       ret = nv50_fbcon_fillrect(info, rect);
-               else
-                       ret = nvc0_fbcon_fillrect(info, rect);
-               mutex_unlock(&drm->client.mutex);
-       }
-
-       if (ret == 0)
-               return;
-
-       if (ret != -ENODEV)
-               nouveau_fbcon_gpu_lockup(info);
-       drm_fb_helper_cfb_fillrect(info, rect);
-}
-
-static void
-nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       struct nvif_device *device = &drm->client.device;
-       int ret;
-
-       if (info->state != FBINFO_STATE_RUNNING)
-               return;
-
-       ret = -ENODEV;
-       if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
-           mutex_trylock(&drm->client.mutex)) {
-               if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
-                       ret = nv04_fbcon_copyarea(info, image);
-               else
-               if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
-                       ret = nv50_fbcon_copyarea(info, image);
-               else
-                       ret = nvc0_fbcon_copyarea(info, image);
-               mutex_unlock(&drm->client.mutex);
-       }
-
-       if (ret == 0)
-               return;
-
-       if (ret != -ENODEV)
-               nouveau_fbcon_gpu_lockup(info);
-       drm_fb_helper_cfb_copyarea(info, image);
-}
-
-static void
-nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       struct nvif_device *device = &drm->client.device;
-       int ret;
-
-       if (info->state != FBINFO_STATE_RUNNING)
-               return;
-
-       ret = -ENODEV;
-       if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
-           mutex_trylock(&drm->client.mutex)) {
-               if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
-                       ret = nv04_fbcon_imageblit(info, image);
-               else
-               if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
-                       ret = nv50_fbcon_imageblit(info, image);
-               else
-                       ret = nvc0_fbcon_imageblit(info, image);
-               mutex_unlock(&drm->client.mutex);
-       }
-
-       if (ret == 0)
-               return;
-
-       if (ret != -ENODEV)
-               nouveau_fbcon_gpu_lockup(info);
-       drm_fb_helper_cfb_imageblit(info, image);
-}
-
-static int
-nouveau_fbcon_sync(struct fb_info *info)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       struct nouveau_channel *chan = drm->channel;
-       int ret;
-
-       if (!chan || !chan->accel_done || in_interrupt() ||
-           info->state != FBINFO_STATE_RUNNING ||
-           info->flags & FBINFO_HWACCEL_DISABLED)
-               return 0;
-
-       if (!mutex_trylock(&drm->client.mutex))
-               return 0;
-
-       ret = nouveau_channel_idle(chan);
-       mutex_unlock(&drm->client.mutex);
-       if (ret) {
-               nouveau_fbcon_gpu_lockup(info);
-               return 0;
-       }
-
-       chan->accel_done = false;
-       return 0;
-}
-
-static int
-nouveau_fbcon_open(struct fb_info *info, int user)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       int ret = pm_runtime_get_sync(drm->dev->dev);
-       if (ret < 0 && ret != -EACCES) {
-               pm_runtime_put(drm->dev->dev);
-               return ret;
-       }
-       return 0;
-}
-
-static int
-nouveau_fbcon_release(struct fb_info *info, int user)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       pm_runtime_put(drm->dev->dev);
-       return 0;
-}
-
-static const struct fb_ops nouveau_fbcon_ops = {
-       .owner = THIS_MODULE,
-       DRM_FB_HELPER_DEFAULT_OPS,
-       .fb_open = nouveau_fbcon_open,
-       .fb_release = nouveau_fbcon_release,
-       .fb_fillrect = nouveau_fbcon_fillrect,
-       .fb_copyarea = nouveau_fbcon_copyarea,
-       .fb_imageblit = nouveau_fbcon_imageblit,
-       .fb_sync = nouveau_fbcon_sync,
-};
-
-static const struct fb_ops nouveau_fbcon_sw_ops = {
-       .owner = THIS_MODULE,
-       DRM_FB_HELPER_DEFAULT_OPS,
-       .fb_open = nouveau_fbcon_open,
-       .fb_release = nouveau_fbcon_release,
-       .fb_fillrect = drm_fb_helper_cfb_fillrect,
-       .fb_copyarea = drm_fb_helper_cfb_copyarea,
-       .fb_imageblit = drm_fb_helper_cfb_imageblit,
-};
-
-void
-nouveau_fbcon_accel_save_disable(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon && drm->fbcon->helper.info) {
-               drm->fbcon->saved_flags = drm->fbcon->helper.info->flags;
-               drm->fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED;
-       }
-}
-
-void
-nouveau_fbcon_accel_restore(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon && drm->fbcon->helper.info)
-               drm->fbcon->helper.info->flags = drm->fbcon->saved_flags;
-}
-
-static void
-nouveau_fbcon_accel_fini(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_fbdev *fbcon = drm->fbcon;
-       if (fbcon && drm->channel) {
-               console_lock();
-               if (fbcon->helper.info)
-                       fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED;
-               console_unlock();
-               nouveau_channel_idle(drm->channel);
-               nvif_object_dtor(&fbcon->twod);
-               nvif_object_dtor(&fbcon->blit);
-               nvif_object_dtor(&fbcon->gdi);
-               nvif_object_dtor(&fbcon->patt);
-               nvif_object_dtor(&fbcon->rop);
-               nvif_object_dtor(&fbcon->clip);
-               nvif_object_dtor(&fbcon->surf2d);
-       }
-}
-
-static void
-nouveau_fbcon_accel_init(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_fbdev *fbcon = drm->fbcon;
-       struct fb_info *info = fbcon->helper.info;
-       int ret;
-
-       if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
-               ret = nv04_fbcon_accel_init(info);
-       else
-       if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
-               ret = nv50_fbcon_accel_init(info);
-       else
-               ret = nvc0_fbcon_accel_init(info);
-
-       if (ret == 0)
-               info->fbops = &nouveau_fbcon_ops;
-}
-
-static void
-nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
-{
-       struct fb_info *info = fbcon->helper.info;
-       struct fb_fillrect rect;
-
-       /* Clear the entire fbcon.  The drm will program every connector
-        * with it's preferred mode.  If the sizes differ, one display will
-        * quite likely have garbage around the console.
-        */
-       rect.dx = rect.dy = 0;
-       rect.width = info->var.xres_virtual;
-       rect.height = info->var.yres_virtual;
-       rect.color = 0;
-       rect.rop = ROP_COPY;
-       info->fbops->fb_fillrect(info, &rect);
-}
-
-static int
-nouveau_fbcon_create(struct drm_fb_helper *helper,
-                    struct drm_fb_helper_surface_size *sizes)
-{
-       struct nouveau_fbdev *fbcon =
-               container_of(helper, struct nouveau_fbdev, helper);
-       struct drm_device *dev = fbcon->helper.dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nvif_device *device = &drm->client.device;
-       struct fb_info *info;
-       struct drm_framebuffer *fb;
-       struct nouveau_channel *chan;
-       struct nouveau_bo *nvbo;
-       struct drm_mode_fb_cmd2 mode_cmd = {};
-       int ret;
-
-       mode_cmd.width = sizes->surface_width;
-       mode_cmd.height = sizes->surface_height;
-
-       mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
-       mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
-
-       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
-                                                         sizes->surface_depth);
-
-       ret = nouveau_gem_new(&drm->client, mode_cmd.pitches[0] *
-                             mode_cmd.height, 0, NOUVEAU_GEM_DOMAIN_VRAM,
-                             0, 0x0000, &nvbo);
-       if (ret) {
-               NV_ERROR(drm, "failed to allocate framebuffer\n");
-               goto out;
-       }
-
-       ret = nouveau_framebuffer_new(dev, &mode_cmd, &nvbo->bo.base, &fb);
-       if (ret)
-               goto out_unref;
-
-       ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
-       if (ret) {
-               NV_ERROR(drm, "failed to pin fb: %d\n", ret);
-               goto out_unref;
-       }
-
-       ret = nouveau_bo_map(nvbo);
-       if (ret) {
-               NV_ERROR(drm, "failed to map fb: %d\n", ret);
-               goto out_unpin;
-       }
-
-       chan = nouveau_nofbaccel ? NULL : drm->channel;
-       if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
-               ret = nouveau_vma_new(nvbo, chan->vmm, &fbcon->vma);
-               if (ret) {
-                       NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
-                       chan = NULL;
-               }
-       }
-
-       info = drm_fb_helper_alloc_info(helper);
-       if (IS_ERR(info)) {
-               ret = PTR_ERR(info);
-               goto out_unlock;
-       }
-
-       /* setup helper */
-       fbcon->helper.fb = fb;
-
-       if (!chan)
-               info->flags = FBINFO_HWACCEL_DISABLED;
-       else
-               info->flags = FBINFO_HWACCEL_COPYAREA |
-                             FBINFO_HWACCEL_FILLRECT |
-                             FBINFO_HWACCEL_IMAGEBLIT;
-       info->fbops = &nouveau_fbcon_sw_ops;
-       info->fix.smem_start = nvbo->bo.resource->bus.offset;
-       info->fix.smem_len = nvbo->bo.base.size;
-
-       info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
-       info->screen_size = nvbo->bo.base.size;
-
-       drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
-
-       /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-
-       if (chan)
-               nouveau_fbcon_accel_init(dev);
-       nouveau_fbcon_zfill(dev, fbcon);
-
-       /* To allow resizeing without swapping buffers */
-       NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
-               fb->width, fb->height, nvbo->offset, nvbo);
-
-       if (dev_is_pci(dev->dev))
-               vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), info);
-
-       return 0;
-
-out_unlock:
-       if (chan)
-               nouveau_vma_del(&fbcon->vma);
-       nouveau_bo_unmap(nvbo);
-out_unpin:
-       nouveau_bo_unpin(nvbo);
-out_unref:
-       nouveau_bo_ref(NULL, &nvbo);
-out:
-       return ret;
-}
-
-static int
-nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
-{
-       struct drm_framebuffer *fb = fbcon->helper.fb;
-       struct nouveau_bo *nvbo;
-
-       drm_fb_helper_unregister_info(&fbcon->helper);
-       drm_fb_helper_fini(&fbcon->helper);
-
-       if (fb && fb->obj[0]) {
-               nvbo = nouveau_gem_object(fb->obj[0]);
-               nouveau_vma_del(&fbcon->vma);
-               nouveau_bo_unmap(nvbo);
-               nouveau_bo_unpin(nvbo);
-               drm_framebuffer_put(fb);
-       }
-
-       return 0;
-}
-
-void nouveau_fbcon_gpu_lockup(struct fb_info *info)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-
-       NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
-       info->flags |= FBINFO_HWACCEL_DISABLED;
-}
-
-static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
-       .fb_probe = nouveau_fbcon_create,
-};
-
-static void
-nouveau_fbcon_set_suspend_work(struct work_struct *work)
-{
-       struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
-       int state = READ_ONCE(drm->fbcon_new_state);
-
-       if (state == FBINFO_STATE_RUNNING)
-               pm_runtime_get_sync(drm->dev->dev);
-
-       console_lock();
-       if (state == FBINFO_STATE_RUNNING)
-               nouveau_fbcon_accel_restore(drm->dev);
-       drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
-       if (state != FBINFO_STATE_RUNNING)
-               nouveau_fbcon_accel_save_disable(drm->dev);
-       console_unlock();
-
-       if (state == FBINFO_STATE_RUNNING) {
-               nouveau_fbcon_hotplug_resume(drm->fbcon);
-               pm_runtime_mark_last_busy(drm->dev->dev);
-               pm_runtime_put_autosuspend(drm->dev->dev);
-       }
-}
-
-void
-nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-
-       if (!drm->fbcon)
-               return;
-
-       drm->fbcon_new_state = state;
-       /* Since runtime resume can happen as a result of a sysfs operation,
-        * it's possible we already have the console locked. So handle fbcon
-        * init/deinit from a seperate work thread
-        */
-       schedule_work(&drm->fbcon_work);
-}
-
-void
-nouveau_fbcon_output_poll_changed(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_fbdev *fbcon = drm->fbcon;
-       int ret;
-
-       if (!fbcon)
-               return;
-
-       mutex_lock(&fbcon->hotplug_lock);
-
-       ret = pm_runtime_get(dev->dev);
-       if (ret == 1 || ret == -EACCES) {
-               drm_fb_helper_hotplug_event(&fbcon->helper);
-
-               pm_runtime_mark_last_busy(dev->dev);
-               pm_runtime_put_autosuspend(dev->dev);
-       } else if (ret == 0) {
-               /* If the GPU was already in the process of suspending before
-                * this event happened, then we can't block here as we'll
-                * deadlock the runtime pmops since they wait for us to
-                * finish. So, just defer this event for when we runtime
-                * resume again. It will be handled by fbcon_work.
-                */
-               NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
-               fbcon->hotplug_waiting = true;
-               pm_runtime_put_noidle(drm->dev->dev);
-       } else {
-               DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
-                        ret);
-       }
-
-       mutex_unlock(&fbcon->hotplug_lock);
-}
-
-void
-nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
-{
-       struct nouveau_drm *drm;
-
-       if (!fbcon)
-               return;
-       drm = nouveau_drm(fbcon->helper.dev);
-
-       mutex_lock(&fbcon->hotplug_lock);
-       if (fbcon->hotplug_waiting) {
-               fbcon->hotplug_waiting = false;
-
-               NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
-               drm_fb_helper_hotplug_event(&fbcon->helper);
-       }
-       mutex_unlock(&fbcon->hotplug_lock);
-}
-
-int
-nouveau_fbcon_init(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_fbdev *fbcon;
-       int preferred_bpp = nouveau_fbcon_bpp;
-       int ret;
-
-       if (!dev->mode_config.num_crtc ||
-           (to_pci_dev(dev->dev)->class >> 8) != PCI_CLASS_DISPLAY_VGA)
-               return 0;
-
-       fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
-       if (!fbcon)
-               return -ENOMEM;
-
-       drm->fbcon = fbcon;
-       INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
-       mutex_init(&fbcon->hotplug_lock);
-
-       drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
-
-       ret = drm_fb_helper_init(dev, &fbcon->helper);
-       if (ret)
-               goto free;
-
-       if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
-               if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
-                       preferred_bpp = 8;
-               else
-               if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
-                       preferred_bpp = 16;
-               else
-                       preferred_bpp = 32;
-       }
-
-       /* disable all the possible outputs/crtcs before entering KMS mode */
-       if (!drm_drv_uses_atomic_modeset(dev))
-               drm_helper_disable_unused_functions(dev);
-
-       ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
-       if (ret)
-               goto fini;
-
-       if (fbcon->helper.info)
-               fbcon->helper.info->pixmap.buf_align = 4;
-       return 0;
-
-fini:
-       drm_fb_helper_fini(&fbcon->helper);
-free:
-       kfree(fbcon);
-       drm->fbcon = NULL;
-       return ret;
-}
-
-void
-nouveau_fbcon_fini(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-
-       if (!drm->fbcon)
-               return;
-
-       drm_kms_helper_poll_fini(dev);
-       nouveau_fbcon_accel_fini(dev);
-       nouveau_fbcon_destroy(dev, drm->fbcon);
-       kfree(drm->fbcon);
-       drm->fbcon = NULL;
-}
index ba3aa0a..da5493f 100644 (file)
@@ -173,7 +173,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
 
        clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
        if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
-               ttm_move_memcpy(clear, ttm->num_pages, dst_iter, src_iter);
+               ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter);
 
        if (!src_iter->ops->maps_tt)
                ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
index 5d05093..9f4a904 100644 (file)
@@ -358,10 +358,18 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
                drm_gem_object_release(obj);
                return ret;
        }
-       drm_gem_object_put(obj);
 
        rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
        rc->bo_handle = handle;
+
+       /*
+        * The handle owns the reference now.  But we must drop our
+        * remaining reference *after* we no longer need to dereference
+        * the obj.  Otherwise userspace could guess the handle and
+        * race closing it from another thread.
+        */
+       drm_gem_object_put(obj);
+
        return 0;
 }
 
@@ -723,11 +731,18 @@ static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
                drm_gem_object_release(obj);
                return ret;
        }
-       drm_gem_object_put(obj);
 
        rc_blob->res_handle = bo->hw_res_handle;
        rc_blob->bo_handle = handle;
 
+       /*
+        * The handle owns the reference now.  But we must drop our
+        * remaining reference *after* we no longer need to dereference
+        * the obj.  Otherwise userspace could guess the handle and
+        * race closing it from another thread.
+        */
+       drm_gem_object_put(obj);
+
        return 0;
 }
 
index 932b125..ddf8373 100644 (file)
@@ -254,40 +254,6 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
        kref_put(&base->refcount, ttm_release_base);
 }
 
-/**
- * ttm_base_object_noref_lookup - look up a base object without reference
- * @tfile: The struct ttm_object_file the object is registered with.
- * @key: The object handle.
- *
- * This function looks up a ttm base object and returns a pointer to it
- * without refcounting the pointer. The returned pointer is only valid
- * until ttm_base_object_noref_release() is called, and the object
- * pointed to by the returned pointer may be doomed. Any persistent usage
- * of the object requires a refcount to be taken using kref_get_unless_zero().
- * Iff this function returns successfully it needs to be paired with
- * ttm_base_object_noref_release() and no sleeping- or scheduling functions
- * may be called inbetween these function callse.
- *
- * Return: A pointer to the object if successful or NULL otherwise.
- */
-struct ttm_base_object *
-ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key)
-{
-       struct vmwgfx_hash_item *hash;
-       int ret;
-
-       rcu_read_lock();
-       ret = ttm_tfile_find_ref_rcu(tfile, key, &hash);
-       if (ret) {
-               rcu_read_unlock();
-               return NULL;
-       }
-
-       __release(RCU);
-       return hlist_entry(hash, struct ttm_ref_object, hash)->obj;
-}
-EXPORT_SYMBOL(ttm_base_object_noref_lookup);
-
 struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
                                               uint64_t key)
 {
@@ -295,15 +261,16 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
        struct vmwgfx_hash_item *hash;
        int ret;
 
-       rcu_read_lock();
-       ret = ttm_tfile_find_ref_rcu(tfile, key, &hash);
+       spin_lock(&tfile->lock);
+       ret = ttm_tfile_find_ref(tfile, key, &hash);
 
        if (likely(ret == 0)) {
                base = hlist_entry(hash, struct ttm_ref_object, hash)->obj;
                if (!kref_get_unless_zero(&base->refcount))
                        base = NULL;
        }
-       rcu_read_unlock();
+       spin_unlock(&tfile->lock);
+
 
        return base;
 }
index f0ebbe3..8098a38 100644 (file)
@@ -307,18 +307,4 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
 #define ttm_prime_object_kfree(__obj, __prime)         \
        kfree_rcu(__obj, __prime.base.rhead)
 
-struct ttm_base_object *
-ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key);
-
-/**
- * ttm_base_object_noref_release - release a base object pointer looked up
- * without reference
- *
- * Releases a base object pointer looked up with ttm_base_object_noref_lookup().
- */
-static inline void ttm_base_object_noref_release(void)
-{
-       __acquire(RCU);
-       rcu_read_unlock();
-}
 #endif
index 321c551..aa1cd51 100644 (file)
@@ -716,44 +716,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
 }
 
 /**
- * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
- * @filp: The TTM object file the handle is registered with.
- * @handle: The user buffer object handle.
- *
- * This function looks up a struct vmw_bo and returns a pointer to the
- * struct vmw_buffer_object it derives from without refcounting the pointer.
- * The returned pointer is only valid until vmw_user_bo_noref_release() is
- * called, and the object pointed to by the returned pointer may be doomed.
- * Any persistent usage of the object requires a refcount to be taken using
- * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
- * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
- * or scheduling functions may be called in between these function calls.
- *
- * Return: A struct vmw_buffer_object pointer if successful or negative
- * error pointer on failure.
- */
-struct vmw_buffer_object *
-vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle)
-{
-       struct vmw_buffer_object *vmw_bo;
-       struct ttm_buffer_object *bo;
-       struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle);
-
-       if (!gobj) {
-               DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
-                         (unsigned long)handle);
-               return ERR_PTR(-ESRCH);
-       }
-       vmw_bo = gem_to_vmw_bo(gobj);
-       bo = ttm_bo_get_unless_zero(&vmw_bo->base);
-       vmw_bo = vmw_buffer_object(bo);
-       drm_gem_object_put(gobj);
-
-       return vmw_bo;
-}
-
-
-/**
  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
  *                       object without unreserving it.
  *
index b062b02..5acbf58 100644 (file)
@@ -830,12 +830,7 @@ extern int vmw_user_resource_lookup_handle(
        uint32_t handle,
        const struct vmw_user_resource_conv *converter,
        struct vmw_resource **p_res);
-extern struct vmw_resource *
-vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
-                                     struct ttm_object_file *tfile,
-                                     uint32_t handle,
-                                     const struct vmw_user_resource_conv *
-                                     converter);
+
 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv);
 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -875,15 +870,6 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
 }
 
 /**
- * vmw_user_resource_noref_release - release a user resource pointer looked up
- * without reference
- */
-static inline void vmw_user_resource_noref_release(void)
-{
-       ttm_base_object_noref_release();
-}
-
-/**
  * Buffer object helper functions - vmwgfx_bo.c
  */
 extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
@@ -934,8 +920,6 @@ extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
 extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
                               struct ttm_resource *mem);
 extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
-extern struct vmw_buffer_object *
-vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle);
 
 /**
  * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
index a5379f6..a44d53e 100644 (file)
@@ -290,20 +290,26 @@ static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
        rcache->valid_handle = 0;
 }
 
+enum vmw_val_add_flags {
+       vmw_val_add_flag_none  =      0,
+       vmw_val_add_flag_noctx = 1 << 0,
+};
+
 /**
- * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
- * rcu-protected pointer to the validation list.
+ * vmw_execbuf_res_val_add - Add a resource to the validation list.
  *
  * @sw_context: Pointer to the software context.
  * @res: Unreferenced rcu-protected pointer to the resource.
  * @dirty: Whether to change dirty status.
+ * @flags: specifies whether to use the context or not
  *
  * Returns: 0 on success. Negative error code on failure. Typical error codes
  * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
  */
-static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
-                                        struct vmw_resource *res,
-                                        u32 dirty)
+static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
+                                  struct vmw_resource *res,
+                                  u32 dirty,
+                                  u32 flags)
 {
        struct vmw_private *dev_priv = res->dev_priv;
        int ret;
@@ -318,24 +324,30 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
                if (dirty)
                        vmw_validation_res_set_dirty(sw_context->ctx,
                                                     rcache->private, dirty);
-               vmw_user_resource_noref_release();
                return 0;
        }
 
-       priv_size = vmw_execbuf_res_size(dev_priv, res_type);
-       ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
-                                         dirty, (void **)&ctx_info,
-                                         &first_usage);
-       vmw_user_resource_noref_release();
-       if (ret)
-               return ret;
+       if ((flags & vmw_val_add_flag_noctx) != 0) {
+               ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
+                                                 (void **)&ctx_info, NULL);
+               if (ret)
+                       return ret;
 
-       if (priv_size && first_usage) {
-               ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
-                                             ctx_info);
-               if (ret) {
-                       VMW_DEBUG_USER("Failed first usage context setup.\n");
+       } else {
+               priv_size = vmw_execbuf_res_size(dev_priv, res_type);
+               ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
+                                                 dirty, (void **)&ctx_info,
+                                                 &first_usage);
+               if (ret)
                        return ret;
+
+               if (priv_size && first_usage) {
+                       ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
+                                                     ctx_info);
+                       if (ret) {
+                               VMW_DEBUG_USER("Failed first usage context setup.\n");
+                               return ret;
+                       }
                }
        }
 
@@ -344,43 +356,6 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
 }
 
 /**
- * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
- * validation list if it's not already on it
- *
- * @sw_context: Pointer to the software context.
- * @res: Pointer to the resource.
- * @dirty: Whether to change dirty status.
- *
- * Returns: Zero on success. Negative error code on failure.
- */
-static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
-                                        struct vmw_resource *res,
-                                        u32 dirty)
-{
-       struct vmw_res_cache_entry *rcache;
-       enum vmw_res_type res_type = vmw_res_type(res);
-       void *ptr;
-       int ret;
-
-       rcache = &sw_context->res_cache[res_type];
-       if (likely(rcache->valid && rcache->res == res)) {
-               if (dirty)
-                       vmw_validation_res_set_dirty(sw_context->ctx,
-                                                    rcache->private, dirty);
-               return 0;
-       }
-
-       ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
-                                         &ptr, NULL);
-       if (ret)
-               return ret;
-
-       vmw_execbuf_rcache_update(rcache, res, ptr);
-
-       return 0;
-}
-
-/**
  * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
  * validation list
  *
@@ -398,13 +373,13 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
         * First add the resource the view is pointing to, otherwise it may be
         * swapped out when the view is validated.
         */
-       ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
-                                           vmw_view_dirtying(view));
+       ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
+                                     vmw_view_dirtying(view), vmw_val_add_flag_noctx);
        if (ret)
                return ret;
 
-       return vmw_execbuf_res_noctx_val_add(sw_context, view,
-                                            VMW_RES_DIRTY_NONE);
+       return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
+                                      vmw_val_add_flag_noctx);
 }
 
 /**
@@ -475,8 +450,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
                        if (IS_ERR(res))
                                continue;
 
-                       ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                                           VMW_RES_DIRTY_SET);
+                       ret = vmw_execbuf_res_val_add(sw_context, res,
+                                                     VMW_RES_DIRTY_SET,
+                                                     vmw_val_add_flag_noctx);
                        if (unlikely(ret != 0))
                                return ret;
                }
@@ -490,9 +466,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
                if (vmw_res_type(entry->res) == vmw_res_view)
                        ret = vmw_view_res_val_add(sw_context, entry->res);
                else
-                       ret = vmw_execbuf_res_noctx_val_add
-                               (sw_context, entry->res,
-                                vmw_binding_dirtying(entry->bt));
+                       ret = vmw_execbuf_res_val_add(sw_context, entry->res,
+                                                     vmw_binding_dirtying(entry->bt),
+                                                     vmw_val_add_flag_noctx);
                if (unlikely(ret != 0))
                        break;
        }
@@ -658,7 +634,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
 {
        struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
        struct vmw_resource *res;
-       int ret;
+       int ret = 0;
+       bool needs_unref = false;
 
        if (p_res)
                *p_res = NULL;
@@ -683,17 +660,18 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
                if (ret)
                        return ret;
 
-               res = vmw_user_resource_noref_lookup_handle
-                       (dev_priv, sw_context->fp->tfile, *id_loc, converter);
-               if (IS_ERR(res)) {
+               ret = vmw_user_resource_lookup_handle
+                       (dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
+               if (ret != 0) {
                        VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
                                       (unsigned int) *id_loc);
-                       return PTR_ERR(res);
+                       return ret;
                }
+               needs_unref = true;
 
-               ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
+               ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
                if (unlikely(ret != 0))
-                       return ret;
+                       goto res_check_done;
 
                if (rcache->valid && rcache->res == res) {
                        rcache->valid_handle = true;
@@ -708,7 +686,11 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
        if (p_res)
                *p_res = res;
 
-       return 0;
+res_check_done:
+       if (needs_unref)
+               vmw_resource_unreference(&res);
+
+       return ret;
 }
 
 /**
@@ -1171,9 +1153,9 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
        int ret;
 
        vmw_validation_preload_bo(sw_context->ctx);
-       vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
-       if (IS_ERR(vmw_bo)) {
-               VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
+       ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+       if (ret != 0) {
+               drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
                return PTR_ERR(vmw_bo);
        }
        ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
@@ -1225,9 +1207,9 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
        int ret;
 
        vmw_validation_preload_bo(sw_context->ctx);
-       vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
-       if (IS_ERR(vmw_bo)) {
-               VMW_DEBUG_USER("Could not find or use GMR region.\n");
+       ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+       if (ret != 0) {
+               drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
                return PTR_ERR(vmw_bo);
        }
        ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
@@ -2025,8 +2007,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
                res = vmw_shader_lookup(vmw_context_res_man(ctx),
                                        cmd->body.shid, cmd->body.type);
                if (!IS_ERR(res)) {
-                       ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                                           VMW_RES_DIRTY_NONE);
+                       ret = vmw_execbuf_res_val_add(sw_context, res,
+                                                     VMW_RES_DIRTY_NONE,
+                                                     vmw_val_add_flag_noctx);
                        if (unlikely(ret != 0))
                                return ret;
 
@@ -2273,8 +2256,9 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
                        return PTR_ERR(res);
                }
 
-               ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                                   VMW_RES_DIRTY_NONE);
+               ret = vmw_execbuf_res_val_add(sw_context, res,
+                                             VMW_RES_DIRTY_NONE,
+                                             vmw_val_add_flag_noctx);
                if (ret)
                        return ret;
        }
@@ -2777,8 +2761,8 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
                return PTR_ERR(res);
        }
 
-       ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                           VMW_RES_DIRTY_NONE);
+       ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
+                                     vmw_val_add_flag_noctx);
        if (ret) {
                VMW_DEBUG_USER("Error creating resource validation node.\n");
                return ret;
@@ -3098,8 +3082,8 @@ static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
 
        vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
 
-       ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                           VMW_RES_DIRTY_NONE);
+       ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
+                                     vmw_val_add_flag_noctx);
        if (ret) {
                DRM_ERROR("Error creating resource validation node.\n");
                return ret;
@@ -3148,8 +3132,8 @@ static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
                return 0;
        }
 
-       ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                           VMW_RES_DIRTY_NONE);
+       ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
+                                     vmw_val_add_flag_noctx);
        if (ret) {
                DRM_ERROR("Error creating resource validation node.\n");
                return ret;
@@ -4066,22 +4050,26 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
        if (ret)
                return ret;
 
-       res = vmw_user_resource_noref_lookup_handle
+       ret = vmw_user_resource_lookup_handle
                (dev_priv, sw_context->fp->tfile, handle,
-                user_context_converter);
-       if (IS_ERR(res)) {
+                user_context_converter, &res);
+       if (ret != 0) {
                VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
                               (unsigned int) handle);
-               return PTR_ERR(res);
+               return ret;
        }
 
-       ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
-       if (unlikely(ret != 0))
+       ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
+                                     vmw_val_add_flag_none);
+       if (unlikely(ret != 0)) {
+               vmw_resource_unreference(&res);
                return ret;
+       }
 
        sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
        sw_context->man = vmw_context_res_man(res);
 
+       vmw_resource_unreference(&res);
        return 0;
 }
 
index f66caa5..c7d645e 100644 (file)
@@ -281,39 +281,6 @@ out_bad_resource:
        return ret;
 }
 
-/**
- * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
- * TTM user-space handle and perform basic type checks
- *
- * @dev_priv:     Pointer to a device private struct
- * @tfile:        Pointer to a struct ttm_object_file identifying the caller
- * @handle:       The TTM user-space handle
- * @converter:    Pointer to an object describing the resource type
- *
- * If the handle can't be found or is associated with an incorrect resource
- * type, -EINVAL will be returned.
- */
-struct vmw_resource *
-vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
-                                     struct ttm_object_file *tfile,
-                                     uint32_t handle,
-                                     const struct vmw_user_resource_conv
-                                     *converter)
-{
-       struct ttm_base_object *base;
-
-       base = ttm_base_object_noref_lookup(tfile, handle);
-       if (!base)
-               return ERR_PTR(-ESRCH);
-
-       if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
-               ttm_base_object_noref_release();
-               return ERR_PTR(-EINVAL);
-       }
-
-       return converter->base_obj_to_res(base);
-}
-
 /*
  * Helper function that looks either a surface or bo.
  *
index 0d8e6bd..90996c1 100644 (file)
@@ -717,7 +717,7 @@ static int xen_drv_probe(struct xenbus_device *xb_dev,
        return xenbus_switch_state(xb_dev, XenbusStateInitialising);
 }
 
-static int xen_drv_remove(struct xenbus_device *dev)
+static void xen_drv_remove(struct xenbus_device *dev)
 {
        struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev);
        int to = 100;
@@ -751,7 +751,6 @@ static int xen_drv_remove(struct xenbus_device *dev)
 
        xen_drm_drv_fini(front_info);
        xenbus_frontend_closed(dev);
-       return 0;
 }
 
 static const struct xenbus_device_id xen_driver_ids[] = {
index 8d8ebdc..67f1c73 100644 (file)
@@ -51,7 +51,7 @@ module_param_array(ptr_size, int, NULL, 0444);
 MODULE_PARM_DESC(ptr_size,
        "Pointing device width, height in pixels (default 800,600)");
 
-static int xenkbd_remove(struct xenbus_device *);
+static void xenkbd_remove(struct xenbus_device *);
 static int xenkbd_connect_backend(struct xenbus_device *, struct xenkbd_info *);
 static void xenkbd_disconnect_backend(struct xenkbd_info *);
 
@@ -404,7 +404,7 @@ static int xenkbd_resume(struct xenbus_device *dev)
        return xenkbd_connect_backend(dev, info);
 }
 
-static int xenkbd_remove(struct xenbus_device *dev)
+static void xenkbd_remove(struct xenbus_device *dev)
 {
        struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
 
@@ -417,7 +417,6 @@ static int xenkbd_remove(struct xenbus_device *dev)
                input_unregister_device(info->mtouch);
        free_page((unsigned long)info->page);
        kfree(info);
-       return 0;
 }
 
 static int xenkbd_connect_backend(struct xenbus_device *dev,
index ab16019..f2425b0 100644 (file)
@@ -3858,7 +3858,9 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
 
 static void arm_smmu_device_shutdown(struct platform_device *pdev)
 {
-       arm_smmu_device_remove(pdev);
+       struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
+
+       arm_smmu_device_disable(smmu);
 }
 
 static const struct of_device_id arm_smmu_of_match[] = {
index 719fbca..2ff7a72 100644 (file)
@@ -1316,8 +1316,14 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
 
        switch (cap) {
        case IOMMU_CAP_CACHE_COHERENCY:
-               /* Assume that a coherent TCU implies coherent TBUs */
-               return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
+               /*
+                * It's overwhelmingly the case in practice that when the pagetable
+                * walk interface is connected to a coherent interconnect, all the
+                * translation interfaces are too. Furthermore if the device is
+                * natively coherent, then its translation interface must also be.
+                */
+               return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
+                       device_get_dma_attr(dev) == DEV_DMA_COHERENT;
        case IOMMU_CAP_NOEXEC:
                return true;
        default:
@@ -2185,19 +2191,16 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int arm_smmu_device_remove(struct platform_device *pdev)
+static void arm_smmu_device_shutdown(struct platform_device *pdev)
 {
        struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
 
        if (!smmu)
-               return -ENODEV;
+               return;
 
        if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
                dev_notice(&pdev->dev, "disabling translation\n");
 
-       iommu_device_unregister(&smmu->iommu);
-       iommu_device_sysfs_remove(&smmu->iommu);
-
        arm_smmu_rpm_get(smmu);
        /* Turn the thing off */
        arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
@@ -2209,12 +2212,21 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
                clk_bulk_disable(smmu->num_clks, smmu->clks);
 
        clk_bulk_unprepare(smmu->num_clks, smmu->clks);
-       return 0;
 }
 
-static void arm_smmu_device_shutdown(struct platform_device *pdev)
+static int arm_smmu_device_remove(struct platform_device *pdev)
 {
-       arm_smmu_device_remove(pdev);
+       struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
+
+       if (!smmu)
+               return -ENODEV;
+
+       iommu_device_unregister(&smmu->iommu);
+       iommu_device_sysfs_remove(&smmu->iommu);
+
+       arm_smmu_device_shutdown(pdev);
+
+       return 0;
 }
 
 static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
index de91dd8..5f6a85a 100644 (file)
@@ -3185,14 +3185,16 @@ EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner);
  */
 int iommu_device_claim_dma_owner(struct device *dev, void *owner)
 {
-       struct iommu_group *group = iommu_group_get(dev);
+       struct iommu_group *group;
        int ret = 0;
 
-       if (!group)
-               return -ENODEV;
        if (WARN_ON(!owner))
                return -EINVAL;
 
+       group = iommu_group_get(dev);
+       if (!group)
+               return -ENODEV;
+
        mutex_lock(&group->mutex);
        if (group->owner_cnt) {
                if (group->owner != owner) {
index a44ad92..fe452ce 100644 (file)
@@ -197,7 +197,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 
        curr = __get_cached_rbnode(iovad, limit_pfn);
        curr_iova = to_iova(curr);
-       retry_pfn = curr_iova->pfn_hi + 1;
+       retry_pfn = curr_iova->pfn_hi;
 
 retry:
        do {
@@ -211,7 +211,7 @@ retry:
        if (high_pfn < size || new_pfn < low_pfn) {
                if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
                        high_pfn = limit_pfn;
-                       low_pfn = retry_pfn;
+                       low_pfn = retry_pfn + 1;
                        curr = iova_find_limit(iovad, limit_pfn);
                        curr_iova = to_iova(curr);
                        goto retry;
index 69682ee..ca581ff 100644 (file)
@@ -683,7 +683,7 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev)
        ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
                                     dev_name(&pdev->dev));
        if (ret)
-               return ret;
+               goto out_clk_unprepare;
 
        ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev);
        if (ret)
@@ -698,6 +698,8 @@ out_dev_unreg:
        iommu_device_unregister(&data->iommu);
 out_sysfs_remove:
        iommu_device_sysfs_remove(&data->iommu);
+out_clk_unprepare:
+       clk_disable_unprepare(data->bclk);
        return ret;
 }
 
index caa952c..7abdb98 100644 (file)
@@ -389,7 +389,7 @@ config LS_EXTIRQ
 
 config LS_SCFG_MSI
        def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
-       depends on PCI && PCI_MSI
+       depends on PCI_MSI
 
 config PARTITION_PERCPU
        bool
@@ -658,6 +658,7 @@ config APPLE_AIC
        bool "Apple Interrupt Controller (AIC)"
        depends on ARM64
        depends on ARCH_APPLE || COMPILE_TEST
+       select GENERIC_IRQ_IPI_MUX
        help
          Support for the Apple Interrupt Controller found on Apple Silicon SoCs,
          such as the M1.
index 5ddb8e5..9c8b134 100644 (file)
@@ -199,21 +199,20 @@ static int alpine_msix_init_domains(struct alpine_msix_data *priv,
        }
 
        gic_domain = irq_find_host(gic_node);
+       of_node_put(gic_node);
        if (!gic_domain) {
                pr_err("Failed to find the GIC domain\n");
                return -ENXIO;
        }
 
-       middle_domain = irq_domain_add_tree(NULL,
-                                           &alpine_msix_middle_domain_ops,
-                                           priv);
+       middle_domain = irq_domain_add_hierarchy(gic_domain, 0, 0, NULL,
+                                                &alpine_msix_middle_domain_ops,
+                                                priv);
        if (!middle_domain) {
                pr_err("Failed to create the MSIX middle domain\n");
                return -ENOMEM;
        }
 
-       middle_domain->parent = gic_domain;
-
        msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
                                               &alpine_msix_domain_info,
                                               middle_domain);
index cf513b6..eabb3b9 100644 (file)
@@ -292,7 +292,6 @@ struct aic_irq_chip {
        void __iomem *base;
        void __iomem *event;
        struct irq_domain *hw_domain;
-       struct irq_domain *ipi_domain;
        struct {
                cpumask_t aff;
        } *fiq_aff[AIC_NR_FIQ];
@@ -307,9 +306,6 @@ struct aic_irq_chip {
 
 static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
 
-static DEFINE_PER_CPU(atomic_t, aic_vipi_flag);
-static DEFINE_PER_CPU(atomic_t, aic_vipi_enable);
-
 static struct aic_irq_chip *aic_irqc;
 
 static void aic_handle_ipi(struct pt_regs *regs);
@@ -751,98 +747,8 @@ static void aic_ipi_send_fast(int cpu)
        isb();
 }
 
-static void aic_ipi_mask(struct irq_data *d)
-{
-       u32 irq_bit = BIT(irqd_to_hwirq(d));
-
-       /* No specific ordering requirements needed here. */
-       atomic_andnot(irq_bit, this_cpu_ptr(&aic_vipi_enable));
-}
-
-static void aic_ipi_unmask(struct irq_data *d)
-{
-       struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
-       u32 irq_bit = BIT(irqd_to_hwirq(d));
-
-       atomic_or(irq_bit, this_cpu_ptr(&aic_vipi_enable));
-
-       /*
-        * The atomic_or() above must complete before the atomic_read()
-        * below to avoid racing aic_ipi_send_mask().
-        */
-       smp_mb__after_atomic();
-
-       /*
-        * If a pending vIPI was unmasked, raise a HW IPI to ourselves.
-        * No barriers needed here since this is a self-IPI.
-        */
-       if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) {
-               if (static_branch_likely(&use_fast_ipi))
-                       aic_ipi_send_fast(smp_processor_id());
-               else
-                       aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
-       }
-}
-
-static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
-{
-       struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
-       u32 irq_bit = BIT(irqd_to_hwirq(d));
-       u32 send = 0;
-       int cpu;
-       unsigned long pending;
-
-       for_each_cpu(cpu, mask) {
-               /*
-                * This sequence is the mirror of the one in aic_ipi_unmask();
-                * see the comment there. Additionally, release semantics
-                * ensure that the vIPI flag set is ordered after any shared
-                * memory accesses that precede it. This therefore also pairs
-                * with the atomic_fetch_andnot in aic_handle_ipi().
-                */
-               pending = atomic_fetch_or_release(irq_bit, per_cpu_ptr(&aic_vipi_flag, cpu));
-
-               /*
-                * The atomic_fetch_or_release() above must complete before the
-                * atomic_read() below to avoid racing aic_ipi_unmask().
-                */
-               smp_mb__after_atomic();
-
-               if (!(pending & irq_bit) &&
-                   (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) {
-                       if (static_branch_likely(&use_fast_ipi))
-                               aic_ipi_send_fast(cpu);
-                       else
-                               send |= AIC_IPI_SEND_CPU(cpu);
-               }
-       }
-
-       /*
-        * The flag writes must complete before the physical IPI is issued
-        * to another CPU. This is implied by the control dependency on
-        * the result of atomic_read_acquire() above, which is itself
-        * already ordered after the vIPI flag write.
-        */
-       if (send)
-               aic_ic_write(ic, AIC_IPI_SEND, send);
-}
-
-static struct irq_chip ipi_chip = {
-       .name = "AIC-IPI",
-       .irq_mask = aic_ipi_mask,
-       .irq_unmask = aic_ipi_unmask,
-       .ipi_send_mask = aic_ipi_send_mask,
-};
-
-/*
- * IPI IRQ domain
- */
-
 static void aic_handle_ipi(struct pt_regs *regs)
 {
-       int i;
-       unsigned long enabled, firing;
-
        /*
         * Ack the IPI. We need to order this after the AIC event read, but
         * that is enforced by normal MMIO ordering guarantees.
@@ -857,27 +763,7 @@ static void aic_handle_ipi(struct pt_regs *regs)
                aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
        }
 
-       /*
-        * The mask read does not need to be ordered. Only we can change
-        * our own mask anyway, so no races are possible here, as long as
-        * we are properly in the interrupt handler (which is covered by
-        * the barrier that is part of the top-level AIC handler's readl()).
-        */
-       enabled = atomic_read(this_cpu_ptr(&aic_vipi_enable));
-
-       /*
-        * Clear the IPIs we are about to handle. This pairs with the
-        * atomic_fetch_or_release() in aic_ipi_send_mask(), and needs to be
-        * ordered after the aic_ic_write() above (to avoid dropping vIPIs) and
-        * before IPI handling code (to avoid races handling vIPIs before they
-        * are signaled). The former is taken care of by the release semantics
-        * of the write portion, while the latter is taken care of by the
-        * acquire semantics of the read portion.
-        */
-       firing = atomic_fetch_andnot(enabled, this_cpu_ptr(&aic_vipi_flag)) & enabled;
-
-       for_each_set_bit(i, &firing, AIC_NR_SWIPI)
-               generic_handle_domain_irq(aic_irqc->ipi_domain, i);
+       ipi_mux_process();
 
        /*
         * No ordering needed here; at worst this just changes the timing of
@@ -887,53 +773,24 @@ static void aic_handle_ipi(struct pt_regs *regs)
                aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
 }
 
-static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq,
-                        unsigned int nr_irqs, void *args)
+static void aic_ipi_send_single(unsigned int cpu)
 {
-       int i;
-
-       for (i = 0; i < nr_irqs; i++) {
-               irq_set_percpu_devid(virq + i);
-               irq_domain_set_info(d, virq + i, i, &ipi_chip, d->host_data,
-                                   handle_percpu_devid_irq, NULL, NULL);
-       }
-
-       return 0;
-}
-
-static void aic_ipi_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
-{
-       /* Not freeing IPIs */
+       if (static_branch_likely(&use_fast_ipi))
+               aic_ipi_send_fast(cpu);
+       else
+               aic_ic_write(aic_irqc, AIC_IPI_SEND, AIC_IPI_SEND_CPU(cpu));
 }
 
-static const struct irq_domain_ops aic_ipi_domain_ops = {
-       .alloc = aic_ipi_alloc,
-       .free = aic_ipi_free,
-};
-
 static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
 {
-       struct irq_domain *ipi_domain;
        int base_ipi;
 
-       ipi_domain = irq_domain_create_linear(irqc->hw_domain->fwnode, AIC_NR_SWIPI,
-                                             &aic_ipi_domain_ops, irqc);
-       if (WARN_ON(!ipi_domain))
-               return -ENODEV;
-
-       ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
-       irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
-
-       base_ipi = irq_domain_alloc_irqs(ipi_domain, AIC_NR_SWIPI, NUMA_NO_NODE, NULL);
-       if (WARN_ON(!base_ipi)) {
-               irq_domain_remove(ipi_domain);
+       base_ipi = ipi_mux_create(AIC_NR_SWIPI, aic_ipi_send_single);
+       if (WARN_ON(base_ipi <= 0))
                return -ENODEV;
-       }
 
        set_smp_ipi_range(base_ipi, AIC_NR_SWIPI);
 
-       irqc->ipi_domain = ipi_domain;
-
        return 0;
 }
 
index 279e92c..94a7223 100644 (file)
@@ -17,8 +17,9 @@
 
 #define ASPEED_SCU_IC_REG              0x018
 #define ASPEED_SCU_IC_SHIFT            0
-#define ASPEED_SCU_IC_ENABLE           GENMASK(6, ASPEED_SCU_IC_SHIFT)
+#define ASPEED_SCU_IC_ENABLE           GENMASK(15, ASPEED_SCU_IC_SHIFT)
 #define ASPEED_SCU_IC_NUM_IRQS         7
+#define ASPEED_SCU_IC_STATUS           GENMASK(28, 16)
 #define ASPEED_SCU_IC_STATUS_SHIFT     16
 
 #define ASPEED_AST2600_SCU_IC0_REG     0x560
@@ -155,6 +156,8 @@ static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
                rc = PTR_ERR(scu_ic->scu);
                goto err;
        }
+       regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_STATUS, ASPEED_SCU_IC_STATUS);
+       regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_ENABLE, 0);
 
        irq = irq_of_parse_and_map(node, 0);
        if (!irq) {
index bb6609c..1e9dab6 100644 (file)
@@ -279,7 +279,8 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
                flags |= IRQ_GC_BE_IO;
 
        ret = irq_alloc_domain_generic_chips(data->domain, IRQS_PER_WORD, 1,
-                               dn->full_name, handle_level_irq, clr, 0, flags);
+                               dn->full_name, handle_level_irq, clr,
+                               IRQ_LEVEL, flags);
        if (ret) {
                pr_err("failed to allocate generic irq chip\n");
                goto out_free_domain;
index e4efc08..091b0fe 100644 (file)
@@ -161,6 +161,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
                                          *init_params)
 {
        unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+       unsigned int set = 0;
        struct brcmstb_l2_intc_data *data;
        struct irq_chip_type *ct;
        int ret;
@@ -208,9 +209,12 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
        if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                flags |= IRQ_GC_BE_IO;
 
+       if (init_params->handler == handle_level_irq)
+               set |= IRQ_LEVEL;
+
        /* Allocate a single Generic IRQ chip for this node */
        ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
-                       np->full_name, init_params->handler, clr, 0, flags);
+                       np->full_name, init_params->handler, clr, set, flags);
        if (ret) {
                pr_err("failed to allocate generic irq chip\n");
                goto out_free_domain;
index f4d7eeb..f1e75b3 100644 (file)
@@ -287,15 +287,14 @@ static __init int gicv2m_allocate_domains(struct irq_domain *parent)
        if (!v2m)
                return 0;
 
-       inner_domain = irq_domain_create_tree(v2m->fwnode,
-                                             &gicv2m_domain_ops, v2m);
+       inner_domain = irq_domain_create_hierarchy(parent, 0, 0, v2m->fwnode,
+                                                  &gicv2m_domain_ops, v2m);
        if (!inner_domain) {
                pr_err("Failed to create GICv2m domain\n");
                return -ENOMEM;
        }
 
        irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
-       inner_domain->parent = parent;
        pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
                                               &gicv2m_msi_domain_info,
                                               inner_domain);
index 973ede0..5634d29 100644 (file)
@@ -4909,18 +4909,19 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
        if (!info)
                return -ENOMEM;
 
-       inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
+       info->ops = &its_msi_domain_ops;
+       info->data = its;
+
+       inner_domain = irq_domain_create_hierarchy(its_parent,
+                                                  its->msi_domain_flags, 0,
+                                                  handle, &its_domain_ops,
+                                                  info);
        if (!inner_domain) {
                kfree(info);
                return -ENOMEM;
        }
 
-       inner_domain->parent = its_parent;
        irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
-       inner_domain->flags |= its->msi_domain_flags;
-       info->ops = &its_msi_domain_ops;
-       info->data = its;
-       inner_domain->host_data = info;
 
        return 0;
 }
index e1efdec..dbb8b1e 100644 (file)
@@ -233,13 +233,12 @@ static int mbi_allocate_domains(struct irq_domain *parent)
        struct irq_domain *nexus_domain, *pci_domain, *plat_domain;
        int err;
 
-       nexus_domain = irq_domain_create_tree(parent->fwnode,
-                                             &mbi_domain_ops, NULL);
+       nexus_domain = irq_domain_create_hierarchy(parent, 0, 0, parent->fwnode,
+                                                  &mbi_domain_ops, NULL);
        if (!nexus_domain)
                return -ENOMEM;
 
        irq_domain_update_bus_token(nexus_domain, DOMAIN_BUS_NEXUS);
-       nexus_domain->parent = parent;
 
        err = mbi_allocate_pci_domain(nexus_domain, &pci_domain);
 
index 85b754f..8d00a9a 100644 (file)
@@ -55,6 +55,8 @@ struct liointc_priv {
        struct liointc_handler_data     handler[LIOINTC_NUM_PARENT];
        void __iomem                    *core_isr[LIOINTC_NUM_CORES];
        u8                              map_cache[LIOINTC_CHIP_IRQ];
+       u32                             int_pol;
+       u32                             int_edge;
        bool                            has_lpc_irq_errata;
 };
 
@@ -138,6 +140,14 @@ static int liointc_set_type(struct irq_data *data, unsigned int type)
        return 0;
 }
 
+static void liointc_suspend(struct irq_chip_generic *gc)
+{
+       struct liointc_priv *priv = gc->private;
+
+       priv->int_pol = readl(gc->reg_base + LIOINTC_REG_INTC_POL);
+       priv->int_edge = readl(gc->reg_base + LIOINTC_REG_INTC_EDGE);
+}
+
 static void liointc_resume(struct irq_chip_generic *gc)
 {
        struct liointc_priv *priv = gc->private;
@@ -150,6 +160,8 @@ static void liointc_resume(struct irq_chip_generic *gc)
        /* Restore map cache */
        for (i = 0; i < LIOINTC_CHIP_IRQ; i++)
                writeb(priv->map_cache[i], gc->reg_base + i);
+       writel(priv->int_pol, gc->reg_base + LIOINTC_REG_INTC_POL);
+       writel(priv->int_edge, gc->reg_base + LIOINTC_REG_INTC_EDGE);
        /* Restore mask cache */
        writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE);
        irq_gc_unlock_irqrestore(gc, flags);
@@ -269,6 +281,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
        gc->private = priv;
        gc->reg_base = base;
        gc->domain = domain;
+       gc->suspend = liointc_suspend;
        gc->resume = liointc_resume;
 
        ct = gc->chip_types;
index a72ede9..6e1e1f0 100644 (file)
@@ -163,16 +163,15 @@ static int pch_msi_init_domains(struct pch_msi_data *priv,
 {
        struct irq_domain *middle_domain, *msi_domain;
 
-       middle_domain = irq_domain_create_linear(domain_handle,
-                                               priv->num_irqs,
-                                               &pch_msi_middle_domain_ops,
-                                               priv);
+       middle_domain = irq_domain_create_hierarchy(parent, 0, priv->num_irqs,
+                                                   domain_handle,
+                                                   &pch_msi_middle_domain_ops,
+                                                   priv);
        if (!middle_domain) {
                pr_err("Failed to create the MSI middle domain\n");
                return -ENOMEM;
        }
 
-       middle_domain->parent = parent;
        irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
 
        msi_domain = pci_msi_create_irq_domain(domain_handle,
index fe88a78..c43a345 100644 (file)
@@ -221,6 +221,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
        }
 
        parent_domain = irq_find_host(irq_parent_dn);
+       of_node_put(irq_parent_dn);
        if (!parent_domain) {
                dev_err(&pdev->dev, "failed to find parent IRQ domain\n");
                return -ENODEV;
index dc4145a..1080915 100644 (file)
@@ -161,7 +161,7 @@ static struct msi_domain_info odmi_msi_domain_info = {
 static int __init mvebu_odmi_init(struct device_node *node,
                                  struct device_node *parent)
 {
-       struct irq_domain *inner_domain, *plat_domain;
+       struct irq_domain *parent_domain, *inner_domain, *plat_domain;
        int ret, i;
 
        if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
@@ -197,16 +197,17 @@ static int __init mvebu_odmi_init(struct device_node *node,
                }
        }
 
-       inner_domain = irq_domain_create_linear(of_node_to_fwnode(node),
-                                               odmis_count * NODMIS_PER_FRAME,
-                                               &odmi_domain_ops, NULL);
+       parent_domain = irq_find_host(parent);
+
+       inner_domain = irq_domain_create_hierarchy(parent_domain, 0,
+                                                  odmis_count * NODMIS_PER_FRAME,
+                                                  of_node_to_fwnode(node),
+                                                  &odmi_domain_ops, NULL);
        if (!inner_domain) {
                ret = -ENOMEM;
                goto err_unmap;
        }
 
-       inner_domain->parent = irq_find_host(parent);
-
        plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
                                                     &odmi_msi_domain_info,
                                                     inner_domain);
index fe8fad2..020ddf2 100644 (file)
@@ -236,6 +236,7 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
        }
 
        parent_domain = irq_find_host(parent_node);
+       of_node_put(parent_node);
        if (!parent_domain) {
                dev_err(dev, "Failed to find IRQ parent domain\n");
                return -ENODEV;
index 3570f0a..7899607 100644 (file)
@@ -38,8 +38,10 @@ int platform_irqchip_probe(struct platform_device *pdev)
        struct device_node *par_np = of_irq_find_parent(np);
        of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev);
 
-       if (!irq_init_cb)
+       if (!irq_init_cb) {
+               of_node_put(par_np);
                return -EINVAL;
+       }
 
        if (par_np == np)
                par_np = NULL;
@@ -52,8 +54,10 @@ int platform_irqchip_probe(struct platform_device *pdev)
         * interrupt controller. The actual initialization callback of this
         * interrupt controller can check for specific domains as necessary.
         */
-       if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY))
+       if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) {
+               of_node_put(par_np);
                return -EPROBE_DEFER;
+       }
 
        return irq_init_cb(np, par_np);
 }
index 02601bb..6e5e11c 100644 (file)
@@ -50,7 +50,7 @@ static int scpart_scan_partmap(struct mtd_info *master, loff_t partmap_offs,
        int cnt = 0;
        int res = 0;
        int res2;
-       loff_t offs;
+       uint32_t offs;
        size_t retlen;
        struct sc_part_desc *pdesc = NULL;
        struct sc_part_desc *tmpdesc;
index f601e7b..1c689da 100644 (file)
@@ -91,7 +91,7 @@ static int mtd_parser_tplink_safeloader_parse(struct mtd_info *mtd,
        buf = mtd_parser_tplink_safeloader_read_table(mtd);
        if (!buf) {
                err = -ENOENT;
-               goto err_out;
+               goto err_free_parts;
        }
 
        for (idx = 0, offset = TPLINK_SAFELOADER_DATA_OFFSET;
@@ -118,6 +118,8 @@ static int mtd_parser_tplink_safeloader_parse(struct mtd_info *mtd,
 err_free:
        for (idx -= 1; idx >= 0; idx--)
                kfree(parts[idx].name);
+err_free_parts:
+       kfree(parts);
 err_out:
        return err;
 };
index d8703d7..d67c926 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/module.h>
+#include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/mutex.h>
 #include <linux/math64.h>
index 16ce7a9..240a7e8 100644 (file)
@@ -993,7 +993,7 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
                             DMA_ATTR_WEAK_ORDERING);
        skb = build_skb(page_address(page), PAGE_SIZE);
        if (!skb) {
-               __free_page(page);
+               page_pool_recycle_direct(rxr->page_pool, page);
                return NULL;
        }
        skb_mark_for_recycle(skb);
@@ -1031,7 +1031,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
 
        skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
        if (!skb) {
-               __free_page(page);
+               page_pool_recycle_direct(rxr->page_pool, page);
                return NULL;
        }
 
index 081bd2c..e84e5be 100644 (file)
@@ -3130,7 +3130,7 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
 
        hclgevf_update_rss_size(handle, new_tqps_num);
 
-       hclge_comm_get_rss_tc_info(cur_rss_size, hdev->hw_tc_map,
+       hclge_comm_get_rss_tc_info(kinfo->rss_size, hdev->hw_tc_map,
                                   tc_offset, tc_valid, tc_size);
        ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
                                         tc_valid, tc_size);
index c4e451e..adc02ad 100644 (file)
@@ -3850,7 +3850,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
                                field_flags |= IAVF_CLOUD_FIELD_IIP;
                        } else {
                                dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
-                                       be32_to_cpu(match.mask->dst));
+                                       be32_to_cpu(match.mask->src));
                                return -EINVAL;
                        }
                }
index b5a7f24..43e199b 100644 (file)
@@ -363,6 +363,7 @@ ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
        /* Send the data out to a hardware port */
        write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
        if (!write_buf) {
+               kfree(cmd_buf);
                err = -ENOMEM;
                goto exit;
        }
@@ -460,6 +461,9 @@ static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
        for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
                pf->gnss_tty_port[i] = kzalloc(sizeof(*pf->gnss_tty_port[i]),
                                               GFP_KERNEL);
+               if (!pf->gnss_tty_port[i])
+                       goto err_out;
+
                pf->gnss_serial[i] = NULL;
 
                tty_port_init(pf->gnss_tty_port[i]);
@@ -469,21 +473,23 @@ static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
        err = tty_register_driver(tty_driver);
        if (err) {
                dev_err(dev, "Failed to register TTY driver err=%d\n", err);
-
-               for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
-                       tty_port_destroy(pf->gnss_tty_port[i]);
-                       kfree(pf->gnss_tty_port[i]);
-               }
-               kfree(ttydrv_name);
-               tty_driver_kref_put(pf->ice_gnss_tty_driver);
-
-               return NULL;
+               goto err_out;
        }
 
        for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++)
                dev_info(dev, "%s%d registered\n", ttydrv_name, i);
 
        return tty_driver;
+
+err_out:
+       while (i--) {
+               tty_port_destroy(pf->gnss_tty_port[i]);
+               kfree(pf->gnss_tty_port[i]);
+       }
+       kfree(ttydrv_name);
+       tty_driver_kref_put(pf->ice_gnss_tty_driver);
+
+       return NULL;
 }
 
 /**
index a7b2263..e9747ec 100644 (file)
 #define IGC_TSAUXC_EN_TT0      BIT(0)  /* Enable target time 0. */
 #define IGC_TSAUXC_EN_TT1      BIT(1)  /* Enable target time 1. */
 #define IGC_TSAUXC_EN_CLK0     BIT(2)  /* Enable Configurable Frequency Clock 0. */
+#define IGC_TSAUXC_ST0         BIT(4)  /* Start Clock 0 Toggle on Target Time 0. */
 #define IGC_TSAUXC_EN_CLK1     BIT(5)  /* Enable Configurable Frequency Clock 1. */
+#define IGC_TSAUXC_ST1         BIT(7)  /* Start Clock 1 Toggle on Target Time 1. */
 #define IGC_TSAUXC_EN_TS0      BIT(8)  /* Enable hardware timestamp 0. */
 #define IGC_TSAUXC_AUTT0       BIT(9)  /* Auxiliary Timestamp Taken. */
 #define IGC_TSAUXC_EN_TS1      BIT(10) /* Enable hardware timestamp 0. */
index 8dbb9f9..c34734d 100644 (file)
@@ -322,7 +322,7 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
                ts = ns_to_timespec64(ns);
                if (rq->perout.index == 1) {
                        if (use_freq) {
-                               tsauxc_mask = IGC_TSAUXC_EN_CLK1;
+                               tsauxc_mask = IGC_TSAUXC_EN_CLK1 | IGC_TSAUXC_ST1;
                                tsim_mask = 0;
                        } else {
                                tsauxc_mask = IGC_TSAUXC_EN_TT1;
@@ -333,7 +333,7 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
                        freqout = IGC_FREQOUT1;
                } else {
                        if (use_freq) {
-                               tsauxc_mask = IGC_TSAUXC_EN_CLK0;
+                               tsauxc_mask = IGC_TSAUXC_EN_CLK0 | IGC_TSAUXC_ST0;
                                tsim_mask = 0;
                        } else {
                                tsauxc_mask = IGC_TSAUXC_EN_TT0;
@@ -347,10 +347,12 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
                tsauxc = rd32(IGC_TSAUXC);
                tsim = rd32(IGC_TSIM);
                if (rq->perout.index == 1) {
-                       tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1);
+                       tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1 |
+                                   IGC_TSAUXC_ST1);
                        tsim &= ~IGC_TSICR_TT1;
                } else {
-                       tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0);
+                       tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0 |
+                                   IGC_TSAUXC_ST0);
                        tsim &= ~IGC_TSICR_TT0;
                }
                if (on) {
index 24aa97f..123dca9 100644 (file)
@@ -855,9 +855,11 @@ static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn)
        rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn);
        if (rp_pdev && rp_pdev->subordinate) {
                bus = rp_pdev->subordinate->number;
+               pci_dev_put(rp_pdev);
                return pci_get_domain_bus_and_slot(0, bus, 0);
        }
 
+       pci_dev_put(rp_pdev);
        return NULL;
 }
 
@@ -874,6 +876,7 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
        struct ixgbe_adapter *adapter = hw->back;
        struct pci_dev *pdev = adapter->pdev;
        struct pci_dev *func0_pdev;
+       bool has_mii = false;
 
        /* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices
         * are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0
@@ -884,15 +887,16 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
        func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0));
        if (func0_pdev) {
                if (func0_pdev == pdev)
-                       return true;
-               else
-                       return false;
+                       has_mii = true;
+               goto out;
        }
        func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0));
        if (func0_pdev == pdev)
-               return true;
+               has_mii = true;
 
-       return false;
+out:
+       pci_dev_put(func0_pdev);
+       return has_mii;
 }
 
 /**
index b2b71fe..724df63 100644 (file)
@@ -774,9 +774,9 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
 
        cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
        if (enable)
-               cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
+               cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
        else
-               cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
+               cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
        cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
        return 0;
 }
index fb2d376..5a20d93 100644 (file)
@@ -26,7 +26,6 @@
 #define CMR_P2X_SEL_SHIFT              59ULL
 #define CMR_P2X_SEL_NIX0               1ULL
 #define CMR_P2X_SEL_NIX1               2ULL
-#define CMR_EN                         BIT_ULL(55)
 #define DATA_PKT_TX_EN                 BIT_ULL(53)
 #define DATA_PKT_RX_EN                 BIT_ULL(54)
 #define CGX_LMAC_TYPE_SHIFT            40
index 86653bb..7f8ffbf 100644 (file)
@@ -758,6 +758,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
        if (vf->otx2_wq)
                destroy_workqueue(vf->otx2_wq);
        otx2_ptp_destroy(vf);
+       otx2_mcam_flow_del(vf);
+       otx2_shutdown_tc(vf);
        otx2vf_disable_mbox_intr(vf);
        otx2_detach_resources(&vf->mbox);
        if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
index d3ca745..c837103 100644 (file)
@@ -2176,15 +2176,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
                return -EINVAL;
        }
 
-       cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL);
-       if (!cmd->stats)
-               return -ENOMEM;
-
        cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
-       if (!cmd->pool) {
-               err = -ENOMEM;
-               goto dma_pool_err;
-       }
+       if (!cmd->pool)
+               return -ENOMEM;
 
        err = alloc_cmd_page(dev, cmd);
        if (err)
@@ -2268,8 +2262,6 @@ err_free_page:
 
 err_free_pool:
        dma_pool_destroy(cmd->pool);
-dma_pool_err:
-       kvfree(cmd->stats);
        return err;
 }
 
@@ -2282,7 +2274,6 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
        destroy_msg_cache(dev);
        free_cmd_page(dev, cmd);
        dma_pool_destroy(cmd->pool);
-       kvfree(cmd->stats);
 }
 
 void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
index 512d431..c4378af 100644 (file)
@@ -34,12 +34,6 @@ static int police_act_validate(const struct flow_action_entry *act,
                return -EOPNOTSUPP;
        }
 
-       if (act->police.rate_pkt_ps) {
-               NL_SET_ERR_MSG_MOD(extack,
-                                  "QoS offload not support packets per second");
-               return -EOPNOTSUPP;
-       }
-
        return 0;
 }
 
index 8d7d761..50b60fd 100644 (file)
@@ -127,6 +127,7 @@ mlx5e_post_meter_add_rule(struct mlx5e_priv *priv,
                attr->counter = act_counter;
 
        attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
+       attr->inner_match_level = MLX5_MATCH_NONE;
        attr->outer_match_level = MLX5_MATCH_NONE;
        attr->chain = 0;
        attr->prio = 0;
index fd07c4c..1f62c70 100644 (file)
@@ -88,6 +88,8 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
        struct udphdr *udp = (struct udphdr *)(buf);
        struct vxlanhdr *vxh;
 
+       if (tun_key->tun_flags & TUNNEL_VXLAN_OPT)
+               return -EOPNOTSUPP;
        vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
        *ip_proto = IPPROTO_UDP;
 
index 9369a58..7f6b940 100644 (file)
@@ -62,6 +62,7 @@ struct mlx5e_macsec_sa {
        u32 enc_key_id;
        u32 next_pn;
        sci_t sci;
+       ssci_t ssci;
        salt_t salt;
 
        struct rhash_head hash;
@@ -358,7 +359,6 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5_macsec_obj_attrs obj_attrs;
        union mlx5e_macsec_rule *macsec_rule;
-       struct macsec_key *key;
        int err;
 
        obj_attrs.next_pn = sa->next_pn;
@@ -368,13 +368,9 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
        obj_attrs.aso_pdn = macsec->aso.pdn;
        obj_attrs.epn_state = sa->epn_state;
 
-       key = (is_tx) ? &ctx->sa.tx_sa->key : &ctx->sa.rx_sa->key;
-
        if (sa->epn_state.epn_enabled) {
-               obj_attrs.ssci = (is_tx) ? cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci) :
-                                          cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
-
-               memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
+               obj_attrs.ssci = cpu_to_be32((__force u32)sa->ssci);
+               memcpy(&obj_attrs.salt, &sa->salt, sizeof(sa->salt));
        }
 
        obj_attrs.replay_window = ctx->secy->replay_window;
@@ -499,10 +495,11 @@ mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
 }
 
 static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
-                             const pn_t *next_pn_halves)
+                             const pn_t *next_pn_halves, ssci_t ssci)
 {
        struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
 
+       sa->ssci = ssci;
        sa->salt = key->salt;
        epn_state->epn_enabled = 1;
        epn_state->epn_msb = next_pn_halves->upper;
@@ -550,7 +547,8 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
        tx_sa->assoc_num = assoc_num;
 
        if (secy->xpn)
-               update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves);
+               update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves,
+                                 ctx_tx_sa->ssci);
 
        err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
                                         MLX5_ACCEL_OBJ_MACSEC_KEY,
@@ -945,7 +943,8 @@ static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
        rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
 
        if (ctx->secy->xpn)
-               update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves);
+               update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves,
+                                 ctx_rx_sa->ssci);
 
        err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
                                         MLX5_ACCEL_OBJ_MACSEC_KEY,
index cff5f2e..abcc614 100644 (file)
@@ -4084,6 +4084,9 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
        struct mlx5e_vlan_table *vlan;
        struct mlx5e_params *params;
 
+       if (!netif_device_present(netdev))
+               return features;
+
        vlan = mlx5e_fs_get_vlan(priv->fs);
        mutex_lock(&priv->state_lock);
        params = &priv->channels.params;
index 75b9e15..7d90e5b 100644 (file)
@@ -191,7 +191,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
        if (err) {
                netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
                            rep->vport, err);
-               return;
+               goto out;
        }
 
        #define MLX5_GET_CTR(p, x) \
@@ -241,6 +241,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
        rep_stats->tx_vport_rdma_multicast_bytes =
                MLX5_GET_CTR(out, received_ib_multicast.octets);
 
+out:
        kvfree(out);
 }
 
index c8820ab..3df455f 100644 (file)
@@ -2419,7 +2419,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
 
        priv = mlx5i_epriv(netdev);
        tstamp = &priv->tstamp;
-       stats = rq->stats;
+       stats = &priv->channel_stats[rq->ix]->rq;
 
        flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
        g = (flags_rqpn >> 28) & 3;
index 9af2aa2..dbadaf1 100644 (file)
@@ -1301,7 +1301,6 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
 
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
                err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
-               mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
                if (err)
                        return err;
        }
@@ -1359,8 +1358,10 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
        }
        mutex_unlock(&tc->t_lock);
 
-       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+               mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
                mlx5e_detach_mod_hdr(priv, flow);
+       }
 
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
                mlx5_fc_destroy(priv->mdev, attr->counter);
index e455b21..c981fa7 100644 (file)
@@ -143,7 +143,7 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
                if (mlx5_esw_indir_table_decap_vport(attr))
                        vport = mlx5_esw_indir_table_decap_vport(attr);
 
-               if (attr && !attr->chain && esw_attr->int_port)
+               if (!attr->chain && esw_attr && esw_attr->int_port)
                        metadata =
                                mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
                else
@@ -4143,8 +4143,6 @@ int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
        }
 
        hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
-       memcpy(hca_caps, MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability),
-              MLX5_UN_SZ_BYTES(hca_cap_union));
        MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, 1);
 
        err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
@@ -4236,8 +4234,6 @@ int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
        }
 
        hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
-       memcpy(hca_caps, MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability),
-              MLX5_UN_SZ_BYTES(hca_cap_union));
        MLX5_SET(cmd_hca_cap, hca_caps, roce, enable);
 
        err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
index c247cca..eff92dc 100644 (file)
@@ -90,9 +90,21 @@ static void mlx5i_get_ringparam(struct net_device *dev,
 static int mlx5i_set_channels(struct net_device *dev,
                              struct ethtool_channels *ch)
 {
-       struct mlx5e_priv *priv = mlx5i_epriv(dev);
+       struct mlx5i_priv *ipriv = netdev_priv(dev);
+       struct mlx5e_priv *epriv = mlx5i_epriv(dev);
+
+       /* rtnl lock protects from race between this ethtool op and sub
+        * interface ndo_init/uninit.
+        */
+       ASSERT_RTNL();
+       if (ipriv->num_sub_interfaces > 0) {
+               mlx5_core_warn(epriv->mdev,
+                              "can't change number of channels for interfaces with sub interfaces (%u)\n",
+                              ipriv->num_sub_interfaces);
+               return -EINVAL;
+       }
 
-       return mlx5e_ethtool_set_channels(priv, ch);
+       return mlx5e_ethtool_set_channels(epriv, ch);
 }
 
 static void mlx5i_get_channels(struct net_device *dev,
index 2c73c84..911cf4d 100644 (file)
@@ -160,6 +160,44 @@ void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
        stats->tx_dropped = sstats->tx_queue_dropped;
 }
 
+struct net_device *mlx5i_parent_get(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+       struct mlx5i_priv *ipriv, *parent_ipriv;
+       struct net_device *parent_dev;
+       int parent_ifindex;
+
+       ipriv = priv->ppriv;
+
+       parent_ifindex = netdev->netdev_ops->ndo_get_iflink(netdev);
+       parent_dev = dev_get_by_index(dev_net(netdev), parent_ifindex);
+       if (!parent_dev)
+               return NULL;
+
+       parent_ipriv = netdev_priv(parent_dev);
+
+       ASSERT_RTNL();
+       parent_ipriv->num_sub_interfaces++;
+
+       ipriv->parent_dev = parent_dev;
+
+       return parent_dev;
+}
+
+void mlx5i_parent_put(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+       struct mlx5i_priv *ipriv, *parent_ipriv;
+
+       ipriv = priv->ppriv;
+       parent_ipriv = netdev_priv(ipriv->parent_dev);
+
+       ASSERT_RTNL();
+       parent_ipriv->num_sub_interfaces--;
+
+       dev_put(ipriv->parent_dev);
+}
+
 int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
index 99d46fd..f3f2af9 100644 (file)
@@ -54,9 +54,11 @@ struct mlx5i_priv {
        struct rdma_netdev rn; /* keep this first */
        u32 qpn;
        bool   sub_interface;
+       u32    num_sub_interfaces;
        u32    qkey;
        u16    pkey_index;
        struct mlx5i_pkey_qpn_ht *qpn_htbl;
+       struct net_device *parent_dev;
        char  *mlx5e_priv[];
 };
 
@@ -117,5 +119,9 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                   struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more);
 void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
 
+/* Reference management for child to parent interfaces. */
+struct net_device *mlx5i_parent_get(struct net_device *netdev);
+void mlx5i_parent_put(struct net_device *netdev);
+
 #endif /* CONFIG_MLX5_CORE_IPOIB */
 #endif /* __MLX5E_IPOB_H__ */
index 4d9c9e4..03e6812 100644 (file)
@@ -158,21 +158,28 @@ static int mlx5i_pkey_dev_init(struct net_device *dev)
        struct mlx5e_priv *priv = mlx5i_epriv(dev);
        struct mlx5i_priv *ipriv, *parent_ipriv;
        struct net_device *parent_dev;
-       int parent_ifindex;
 
        ipriv = priv->ppriv;
 
-       /* Get QPN to netdevice hash table from parent */
-       parent_ifindex = dev->netdev_ops->ndo_get_iflink(dev);
-       parent_dev = dev_get_by_index(dev_net(dev), parent_ifindex);
+       /* Link to parent */
+       parent_dev = mlx5i_parent_get(dev);
        if (!parent_dev) {
                mlx5_core_warn(priv->mdev, "failed to get parent device\n");
                return -EINVAL;
        }
 
+       if (dev->num_rx_queues < parent_dev->real_num_rx_queues) {
+               mlx5_core_warn(priv->mdev,
+                              "failed to create child device with rx queues [%d] less than parent's [%d]\n",
+                              dev->num_rx_queues,
+                              parent_dev->real_num_rx_queues);
+               mlx5i_parent_put(dev);
+               return -EINVAL;
+       }
+
+       /* Get QPN to netdevice hash table from parent */
        parent_ipriv = netdev_priv(parent_dev);
        ipriv->qpn_htbl = parent_ipriv->qpn_htbl;
-       dev_put(parent_dev);
 
        return mlx5i_dev_init(dev);
 }
@@ -184,6 +191,7 @@ static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
 static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
 {
+       mlx5i_parent_put(netdev);
        return mlx5i_dev_cleanup(netdev);
 }
 
index 69cfe60..69318b1 100644 (file)
@@ -681,7 +681,7 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
 static const struct ptp_clock_info mlx5_ptp_clock_info = {
        .owner          = THIS_MODULE,
        .name           = "mlx5_ptp",
-       .max_adj        = 100000000,
+       .max_adj        = 50000000,
        .n_alarm        = 0,
        .n_ext_ts       = 0,
        .n_per_out      = 0,
index 74cbe53..b851141 100644 (file)
@@ -3,7 +3,12 @@
 
 #include "dr_types.h"
 
+#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN < 2048)
+/* don't try to optimize STE allocation if the stack is too constaraining */
+#define DR_RULE_MAX_STES_OPTIMIZED 0
+#else
 #define DR_RULE_MAX_STES_OPTIMIZED 5
+#endif
 #define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
 
 static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn,
@@ -1218,10 +1223,7 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
 
        mlx5dr_domain_nic_unlock(nic_dmn);
 
-       if (unlikely(!hw_ste_arr_is_opt))
-               kfree(hw_ste_arr);
-
-       return 0;
+       goto out;
 
 free_rule:
        dr_rule_clean_rule_members(rule, nic_rule);
@@ -1238,6 +1240,7 @@ remove_from_nic_tbl:
 free_hw_ste:
        mlx5dr_domain_nic_unlock(nic_dmn);
 
+out:
        if (unlikely(!hw_ste_arr_is_opt))
                kfree(hw_ste_arr);
 
index c22c3ac..09e3277 100644 (file)
@@ -2951,7 +2951,7 @@ struct mlxsw_sp_nexthop_group_info {
           gateway:1, /* routes using the group use a gateway */
           is_resilient:1;
        struct list_head list; /* member in nh_res_grp_list */
-       struct mlxsw_sp_nexthop nexthops[0];
+       struct mlxsw_sp_nexthop nexthops[];
 #define nh_rif nexthops[0].rif
 };
 
index f9ebfaa..a834843 100644 (file)
@@ -1073,6 +1073,9 @@ void lan966x_ptp_deinit(struct lan966x *lan966x)
        struct lan966x_port *port;
        int i;
 
+       if (!lan966x->ptp)
+               return;
+
        for (i = 0; i < lan966x->num_phys_ports; i++) {
                port = lan966x->ports[i];
                if (!port)
index d8dc9fb..a54c042 100644 (file)
@@ -95,10 +95,7 @@ lan966x_vcap_is2_get_port_keysets(struct net_device *dev, int lookup,
        bool found = false;
        u32 val;
 
-       /* Check if the port keyset selection is enabled */
        val = lan_rd(lan966x, ANA_VCAP_S2_CFG(port->chip_port));
-       if (!ANA_VCAP_S2_CFG_ENA_GET(val))
-               return -ENOENT;
 
        /* Collect all keysets for the port in a list */
        if (l3_proto == ETH_P_ALL)
index 24592d9..dadd61b 100644 (file)
@@ -1996,10 +1996,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
 
                /* 8168F family. */
                { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 },
-               /* It seems this chip version never made it to
-                * the wild. Let's disable detection.
-                * { 0x7cf, 0x481,      RTL_GIGA_MAC_VER_36 },
-                */
+               { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
                { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 },
 
                /* 8168E family. */
index d42e1af..2f7d8e4 100644 (file)
@@ -90,7 +90,6 @@ struct mediatek_dwmac_plat_data {
 struct mediatek_dwmac_variant {
        int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat);
        int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat);
-       void (*dwmac_fix_mac_speed)(void *priv, unsigned int speed);
 
        /* clock ids to be requested */
        const char * const *clk_list;
@@ -443,32 +442,9 @@ static int mt8195_set_delay(struct mediatek_dwmac_plat_data *plat)
        return 0;
 }
 
-static void mt8195_fix_mac_speed(void *priv, unsigned int speed)
-{
-       struct mediatek_dwmac_plat_data *priv_plat = priv;
-
-       if ((phy_interface_mode_is_rgmii(priv_plat->phy_mode))) {
-               /* prefer 2ns fixed delay which is controlled by TXC_PHASE_CTRL,
-                * when link speed is 1Gbps with RGMII interface,
-                * Fall back to delay macro circuit for 10/100Mbps link speed.
-                */
-               if (speed == SPEED_1000)
-                       regmap_update_bits(priv_plat->peri_regmap,
-                                          MT8195_PERI_ETH_CTRL0,
-                                          MT8195_RGMII_TXC_PHASE_CTRL |
-                                          MT8195_DLY_GTXC_ENABLE |
-                                          MT8195_DLY_GTXC_INV |
-                                          MT8195_DLY_GTXC_STAGES,
-                                          MT8195_RGMII_TXC_PHASE_CTRL);
-               else
-                       mt8195_set_delay(priv_plat);
-       }
-}
-
 static const struct mediatek_dwmac_variant mt8195_gmac_variant = {
        .dwmac_set_phy_interface = mt8195_set_interface,
        .dwmac_set_delay = mt8195_set_delay,
-       .dwmac_fix_mac_speed = mt8195_fix_mac_speed,
        .clk_list = mt8195_dwmac_clk_l,
        .num_clks = ARRAY_SIZE(mt8195_dwmac_clk_l),
        .dma_bit_mask = 35,
@@ -619,8 +595,6 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
        plat->bsp_priv = priv_plat;
        plat->init = mediatek_dwmac_init;
        plat->clks_config = mediatek_dwmac_clks_config;
-       if (priv_plat->variant->dwmac_fix_mac_speed)
-               plat->fix_mac_speed = priv_plat->variant->dwmac_fix_mac_speed;
 
        plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
                                             sizeof(*plat->safety_feat_cfg),
index fc06dde..b4388ca 100644 (file)
@@ -210,7 +210,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
                }
                writel(acr_value, ptpaddr + PTP_ACR);
                mutex_unlock(&priv->aux_ts_lock);
-               ret = 0;
+               /* wait for auxts fifo clear to finish */
+               ret = readl_poll_timeout(ptpaddr + PTP_ACR, acr_value,
+                                        !(acr_value & PTP_ACR_ATSFC),
+                                        10, 10000);
                break;
 
        default:
index 7552c40..b83390c 100644 (file)
@@ -357,7 +357,7 @@ static const struct ipa_mem ipa_mem_local_data[] = {
 static const struct ipa_mem_data ipa_mem_data = {
        .local_count    = ARRAY_SIZE(ipa_mem_local_data),
        .local          = ipa_mem_local_data,
-       .imem_addr      = 0x146a9000,
+       .imem_addr      = 0x146a8000,
        .imem_size      = 0x00002000,
        .smem_id        = 497,
        .smem_size      = 0x00009000,
index 8911cd2..c140edb 100644 (file)
@@ -1008,6 +1008,12 @@ static const struct usb_device_id        products[] = {
                                      USB_CDC_PROTO_NONE),
        .driver_info = (unsigned long)&wwan_info,
 }, {
+       /* Cinterion PLS62-W modem by GEMALTO/THALES */
+       USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x005b, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET,
+                                     USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long)&wwan_info,
+}, {
        /* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */
        USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM,
                                      USB_CDC_SUBCLASS_ETHERNET,
index a481a1d..23da1d9 100644 (file)
@@ -9836,6 +9836,7 @@ static const struct usb_device_id rtl8152_table[] = {
        REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab),
        REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6),
        REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
+       REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e),
        REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
        REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x304f),
        REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x3054),
index a83699d..fdd0c9a 100644 (file)
@@ -79,7 +79,8 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
        /* Apple ARM64 platforms have their own idea of board type, passed in
         * via the device tree. They also have an antenna SKU parameter
         */
-       if (!of_property_read_string(np, "brcm,board-type", &prop))
+       err = of_property_read_string(np, "brcm,board-type", &prop);
+       if (!err)
                settings->board_type = prop;
 
        if (!of_property_read_string(np, "apple,antenna-sku", &prop))
@@ -87,7 +88,7 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
 
        /* Set board-type to the first string of the machine compatible prop */
        root = of_find_node_by_path("/");
-       if (root && !settings->board_type) {
+       if (root && err) {
                char *board_type;
                const char *tmp;
 
index c1ba429..0016369 100644 (file)
@@ -977,7 +977,7 @@ static int read_xenbus_vif_flags(struct backend_info *be)
        return 0;
 }
 
-static int netback_remove(struct xenbus_device *dev)
+static void netback_remove(struct xenbus_device *dev)
 {
        struct backend_info *be = dev_get_drvdata(&dev->dev);
 
@@ -992,7 +992,6 @@ static int netback_remove(struct xenbus_device *dev)
        kfree(be->hotplug_script);
        kfree(be);
        dev_set_drvdata(&dev->dev, NULL);
-       return 0;
 }
 
 /*
index 14aec41..12b0742 100644 (file)
@@ -2646,7 +2646,7 @@ static void xennet_bus_close(struct xenbus_device *dev)
        } while (!ret);
 }
 
-static int xennet_remove(struct xenbus_device *dev)
+static void xennet_remove(struct xenbus_device *dev)
 {
        struct netfront_info *info = dev_get_drvdata(&dev->dev);
 
@@ -2662,8 +2662,6 @@ static int xennet_remove(struct xenbus_device *dev)
                rtnl_unlock();
        }
        xennet_free_netdev(info->netdev);
-
-       return 0;
 }
 
 static const struct xenbus_device_id netfront_ids[] = {
index 6f71ac7..ed9c5e2 100644 (file)
@@ -153,10 +153,17 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags)
        return usb_submit_urb(phy->ack_urb, flags);
 }
 
+struct pn533_out_arg {
+       struct pn533_usb_phy *phy;
+       struct completion done;
+};
+
 static int pn533_usb_send_frame(struct pn533 *dev,
                                struct sk_buff *out)
 {
        struct pn533_usb_phy *phy = dev->phy;
+       struct pn533_out_arg arg;
+       void *cntx;
        int rc;
 
        if (phy->priv == NULL)
@@ -168,10 +175,17 @@ static int pn533_usb_send_frame(struct pn533 *dev,
        print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
                             out->data, out->len, false);
 
+       init_completion(&arg.done);
+       cntx = phy->out_urb->context;
+       phy->out_urb->context = &arg;
+
        rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
        if (rc)
                return rc;
 
+       wait_for_completion(&arg.done);
+       phy->out_urb->context = cntx;
+
        if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
                /* request for response for sent packet directly */
                rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
@@ -408,7 +422,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
        return arg.rc;
 }
 
-static void pn533_send_complete(struct urb *urb)
+static void pn533_out_complete(struct urb *urb)
+{
+       struct pn533_out_arg *arg = urb->context;
+       struct pn533_usb_phy *phy = arg->phy;
+
+       switch (urb->status) {
+       case 0:
+               break; /* success */
+       case -ECONNRESET:
+       case -ENOENT:
+               dev_dbg(&phy->udev->dev,
+                       "The urb has been stopped (status %d)\n",
+                       urb->status);
+               break;
+       case -ESHUTDOWN:
+       default:
+               nfc_err(&phy->udev->dev,
+                       "Urb failure (status %d)\n",
+                       urb->status);
+       }
+
+       complete(&arg->done);
+}
+
+static void pn533_ack_complete(struct urb *urb)
 {
        struct pn533_usb_phy *phy = urb->context;
 
@@ -496,10 +534,10 @@ static int pn533_usb_probe(struct usb_interface *interface,
 
        usb_fill_bulk_urb(phy->out_urb, phy->udev,
                          usb_sndbulkpipe(phy->udev, out_endpoint),
-                         NULL, 0, pn533_send_complete, phy);
+                         NULL, 0, pn533_out_complete, phy);
        usb_fill_bulk_urb(phy->ack_urb, phy->udev,
                          usb_sndbulkpipe(phy->udev, out_endpoint),
-                         NULL, 0, pn533_send_complete, phy);
+                         NULL, 0, pn533_ack_complete, phy);
 
        switch (id->driver_info) {
        case PN533_DEVICE_STD:
index e36aeb5..bf1c60e 100644 (file)
@@ -1493,7 +1493,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
        }
 
        ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops,
-                            NVME_QUIRK_SKIP_CID_GEN);
+                            NVME_QUIRK_SKIP_CID_GEN | NVME_QUIRK_IDENTIFY_CNS);
        if (ret) {
                dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl");
                goto put_dev;
index a863991..06f52db 100644 (file)
@@ -8,8 +8,13 @@
 #include <linux/io_uring.h>
 #include "nvme.h"
 
+enum {
+       NVME_IOCTL_VEC          = (1 << 0),
+       NVME_IOCTL_PARTITION    = (1 << 1),
+};
+
 static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
-               fmode_t mode)
+               unsigned int flags, fmode_t mode)
 {
        u32 effects;
 
@@ -17,6 +22,13 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
                return true;
 
        /*
+        * Do not allow unprivileged passthrough on partitions, as that allows an
+        * escape from the containment of the partition.
+        */
+       if (flags & NVME_IOCTL_PARTITION)
+               return false;
+
+       /*
         * Do not allow unprivileged processes to send vendor specific or fabrics
         * commands as we can't be sure about their effects.
         */
@@ -150,7 +162,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
 static int nvme_map_user_request(struct request *req, u64 ubuffer,
                unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
                u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
-               bool vec)
+               unsigned int flags)
 {
        struct request_queue *q = req->q;
        struct nvme_ns *ns = q->queuedata;
@@ -163,7 +175,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
                struct iov_iter iter;
 
                /* fixedbufs is only for non-vectored io */
-               if (WARN_ON_ONCE(vec))
+               if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
                        return -EINVAL;
                ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
                                rq_data_dir(req), &iter, ioucmd);
@@ -172,8 +184,8 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
                ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
        } else {
                ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
-                               bufflen, GFP_KERNEL, vec, 0, 0,
-                               rq_data_dir(req));
+                               bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
+                               0, rq_data_dir(req));
        }
 
        if (ret)
@@ -203,9 +215,9 @@ out:
 }
 
 static int nvme_submit_user_cmd(struct request_queue *q,
-               struct nvme_command *cmd, u64 ubuffer,
-               unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
-               u32 meta_seed, u64 *result, unsigned timeout, bool vec)
+               struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
+               void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
+               u64 *result, unsigned timeout, unsigned int flags)
 {
        struct nvme_ctrl *ctrl;
        struct request *req;
@@ -221,7 +233,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
        req->timeout = timeout;
        if (ubuffer && bufflen) {
                ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
-                               meta_len, meta_seed, &meta, NULL, vec);
+                               meta_len, meta_seed, &meta, NULL, flags);
                if (ret)
                        return ret;
        }
@@ -304,10 +316,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
        c.rw.apptag = cpu_to_le16(io.apptag);
        c.rw.appmask = cpu_to_le16(io.appmask);
 
-       return nvme_submit_user_cmd(ns->queue, &c,
-                       io.addr, length,
-                       metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
-                       false);
+       return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
+                       meta_len, lower_32_bits(io.slba), NULL, 0, 0);
 }
 
 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
@@ -325,7 +335,8 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
 }
 
 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
-                       struct nvme_passthru_cmd __user *ucmd, fmode_t mode)
+               struct nvme_passthru_cmd __user *ucmd, unsigned int flags,
+               fmode_t mode)
 {
        struct nvme_passthru_cmd cmd;
        struct nvme_command c;
@@ -353,16 +364,15 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        c.common.cdw14 = cpu_to_le32(cmd.cdw14);
        c.common.cdw15 = cpu_to_le32(cmd.cdw15);
 
-       if (!nvme_cmd_allowed(ns, &c, mode))
+       if (!nvme_cmd_allowed(ns, &c, 0, mode))
                return -EACCES;
 
        if (cmd.timeout_ms)
                timeout = msecs_to_jiffies(cmd.timeout_ms);
 
        status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
-                       cmd.addr, cmd.data_len,
-                       nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
-                       0, &result, timeout, false);
+                       cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
+                       cmd.metadata_len, 0, &result, timeout, 0);
 
        if (status >= 0) {
                if (put_user(result, &ucmd->result))
@@ -373,8 +383,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 }
 
 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
-                       struct nvme_passthru_cmd64 __user *ucmd, bool vec,
-                       fmode_t mode)
+               struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags,
+               fmode_t mode)
 {
        struct nvme_passthru_cmd64 cmd;
        struct nvme_command c;
@@ -401,16 +411,15 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        c.common.cdw14 = cpu_to_le32(cmd.cdw14);
        c.common.cdw15 = cpu_to_le32(cmd.cdw15);
 
-       if (!nvme_cmd_allowed(ns, &c, mode))
+       if (!nvme_cmd_allowed(ns, &c, flags, mode))
                return -EACCES;
 
        if (cmd.timeout_ms)
                timeout = msecs_to_jiffies(cmd.timeout_ms);
 
        status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
-                       cmd.addr, cmd.data_len,
-                       nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
-                       0, &cmd.result, timeout, vec);
+                       cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
+                       cmd.metadata_len, 0, &cmd.result, timeout, flags);
 
        if (status >= 0) {
                if (put_user(cmd.result, &ucmd->result))
@@ -571,7 +580,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
        c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
 
-       if (!nvme_cmd_allowed(ns, &c, ioucmd->file->f_mode))
+       if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode))
                return -EACCES;
 
        d.metadata = READ_ONCE(cmd->metadata);
@@ -641,9 +650,9 @@ static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
 {
        switch (cmd) {
        case NVME_IOCTL_ADMIN_CMD:
-               return nvme_user_cmd(ctrl, NULL, argp, mode);
+               return nvme_user_cmd(ctrl, NULL, argp, 0, mode);
        case NVME_IOCTL_ADMIN64_CMD:
-               return nvme_user_cmd64(ctrl, NULL, argp, false, mode);
+               return nvme_user_cmd64(ctrl, NULL, argp, 0, mode);
        default:
                return sed_ioctl(ctrl->opal_dev, cmd, argp);
        }
@@ -668,14 +677,14 @@ struct nvme_user_io32 {
 #endif /* COMPAT_FOR_U64_ALIGNMENT */
 
 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
-               void __user *argp, fmode_t mode)
+               void __user *argp, unsigned int flags, fmode_t mode)
 {
        switch (cmd) {
        case NVME_IOCTL_ID:
                force_successful_syscall_return();
                return ns->head->ns_id;
        case NVME_IOCTL_IO_CMD:
-               return nvme_user_cmd(ns->ctrl, ns, argp, mode);
+               return nvme_user_cmd(ns->ctrl, ns, argp, flags, mode);
        /*
         * struct nvme_user_io can have different padding on some 32-bit ABIs.
         * Just accept the compat version as all fields that are used are the
@@ -686,37 +695,40 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
 #endif
        case NVME_IOCTL_SUBMIT_IO:
                return nvme_submit_io(ns, argp);
-       case NVME_IOCTL_IO64_CMD:
-               return nvme_user_cmd64(ns->ctrl, ns, argp, false, mode);
        case NVME_IOCTL_IO64_CMD_VEC:
-               return nvme_user_cmd64(ns->ctrl, ns, argp, true, mode);
+               flags |= NVME_IOCTL_VEC;
+               fallthrough;
+       case NVME_IOCTL_IO64_CMD:
+               return nvme_user_cmd64(ns->ctrl, ns, argp, flags, mode);
        default:
                return -ENOTTY;
        }
 }
 
-static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg,
-                       fmode_t mode)
-{
-       if (is_ctrl_ioctl(cmd))
-               return nvme_ctrl_ioctl(ns->ctrl, cmd, arg, mode);
-       return nvme_ns_ioctl(ns, cmd, arg, mode);
-}
-
 int nvme_ioctl(struct block_device *bdev, fmode_t mode,
                unsigned int cmd, unsigned long arg)
 {
        struct nvme_ns *ns = bdev->bd_disk->private_data;
+       void __user *argp = (void __user *)arg;
+       unsigned int flags = 0;
 
-       return __nvme_ioctl(ns, cmd, (void __user *)arg, mode);
+       if (bdev_is_partition(bdev))
+               flags |= NVME_IOCTL_PARTITION;
+
+       if (is_ctrl_ioctl(cmd))
+               return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, mode);
+       return nvme_ns_ioctl(ns, cmd, argp, flags, mode);
 }
 
 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        struct nvme_ns *ns =
                container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
+       void __user *argp = (void __user *)arg;
 
-       return __nvme_ioctl(ns, cmd, (void __user *)arg, file->f_mode);
+       if (is_ctrl_ioctl(cmd))
+               return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, file->f_mode);
+       return nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode);
 }
 
 static int nvme_uring_cmd_checks(unsigned int issue_flags)
@@ -806,6 +818,10 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
        void __user *argp = (void __user *)arg;
        struct nvme_ns *ns;
        int srcu_idx, ret = -EWOULDBLOCK;
+       unsigned int flags = 0;
+
+       if (bdev_is_partition(bdev))
+               flags |= NVME_IOCTL_PARTITION;
 
        srcu_idx = srcu_read_lock(&head->srcu);
        ns = nvme_find_path(head);
@@ -821,7 +837,7 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
                return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
                                        mode);
 
-       ret = nvme_ns_ioctl(ns, cmd, argp, mode);
+       ret = nvme_ns_ioctl(ns, cmd, argp, flags, mode);
 out_unlock:
        srcu_read_unlock(&head->srcu, srcu_idx);
        return ret;
@@ -846,7 +862,7 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
                return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
                                file->f_mode);
 
-       ret = nvme_ns_ioctl(ns, cmd, argp, file->f_mode);
+       ret = nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode);
 out_unlock:
        srcu_read_unlock(&head->srcu, srcu_idx);
        return ret;
@@ -945,7 +961,7 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
        kref_get(&ns->kref);
        up_read(&ctrl->namespaces_rwsem);
 
-       ret = nvme_user_cmd(ctrl, ns, argp, mode);
+       ret = nvme_user_cmd(ctrl, ns, argp, 0, mode);
        nvme_put_ns(ns);
        return ret;
 
@@ -962,9 +978,9 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
 
        switch (cmd) {
        case NVME_IOCTL_ADMIN_CMD:
-               return nvme_user_cmd(ctrl, NULL, argp, file->f_mode);
+               return nvme_user_cmd(ctrl, NULL, argp, 0, file->f_mode);
        case NVME_IOCTL_ADMIN64_CMD:
-               return nvme_user_cmd64(ctrl, NULL, argp, false, file->f_mode);
+               return nvme_user_cmd64(ctrl, NULL, argp, 0, file->f_mode);
        case NVME_IOCTL_IO_CMD:
                return nvme_dev_user_cmd(ctrl, argp, file->f_mode);
        case NVME_IOCTL_RESET:
index b13bacc..a2553b7 100644 (file)
@@ -2533,7 +2533,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
         */
        result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
        if (result < 0)
-               return result;
+               goto disable;
 
        dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
 
@@ -2586,8 +2586,13 @@ static int nvme_pci_enable(struct nvme_dev *dev)
        pci_enable_pcie_error_reporting(pdev);
        pci_save_state(pdev);
 
-       return nvme_pci_configure_admin_queue(dev);
+       result = nvme_pci_configure_admin_queue(dev);
+       if (result)
+               goto free_irq;
+       return result;
 
+ free_irq:
+       pci_free_irq_vectors(pdev);
  disable:
        pci_disable_device(pdev);
        return result;
@@ -3495,7 +3500,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_SINGLE_VECTOR |
                                NVME_QUIRK_128_BYTES_SQES |
                                NVME_QUIRK_SHARED_TAGS |
-                               NVME_QUIRK_SKIP_CID_GEN },
+                               NVME_QUIRK_SKIP_CID_GEN |
+                               NVME_QUIRK_IDENTIFY_CNS },
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { 0, }
 };
index a0d2713..99ec91e 100644 (file)
@@ -225,7 +225,7 @@ config PCIE_ARTPEC6_EP
 config PCIE_BT1
        tristate "Baikal-T1 PCIe controller"
        depends on MIPS_BAIKAL_T1 || COMPILE_TEST
-       depends on PCI_MSI_IRQ_DOMAIN
+       depends on PCI_MSI
        select PCIE_DW_HOST
        help
          Enables support for the PCIe controller in the Baikal-T1 SoC to work
index 7378e2f..fcd029c 100644 (file)
@@ -1055,14 +1055,12 @@ out:
        return err;
 }
 
-static int pcifront_xenbus_remove(struct xenbus_device *xdev)
+static void pcifront_xenbus_remove(struct xenbus_device *xdev)
 {
        struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev);
 
        if (pdev)
                free_pdev(pdev);
-
-       return 0;
 }
 
 static const struct xenbus_device_id xenpci_ids[] = {
index 43e7651..c6537a1 100644 (file)
@@ -1700,8 +1700,10 @@ int ssam_request_sync(struct ssam_controller *ctrl,
                return status;
 
        status = ssam_request_sync_init(rqst, spec->flags);
-       if (status)
+       if (status) {
+               ssam_request_sync_free(rqst);
                return status;
+       }
 
        ssam_request_sync_set_resp(rqst, rsp);
 
index f556557..6913297 100644 (file)
@@ -916,6 +916,20 @@ static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
        if (sshp_parse_command(dev, data, &command, &command_data))
                return;
 
+       /*
+        * Check if the message was intended for us. If not, drop it.
+        *
+        * Note: We will need to change this to handle debug messages. On newer
+        * generation devices, these seem to be sent to tid_out=0x03. We as
+        * host can still receive them as they can be forwarded via an override
+        * option on SAM, but doing so does not change tid_out=0x00.
+        */
+       if (command->tid_out != 0x00) {
+               rtl_warn(rtl, "rtl: dropping message not intended for us (tid = %#04x)\n",
+                        command->tid_out);
+               return;
+       }
+
        if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid)))
                ssh_rtl_rx_event(rtl, command, &command_data);
        else
index 439d282..8d92498 100644 (file)
@@ -932,7 +932,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
        if (enable_stb && (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB)) {
                err = amd_pmc_s2d_init(dev);
                if (err)
-                       return err;
+                       goto err_pci_dev_put;
        }
 
        platform_set_drvdata(pdev, dev);
index c685a70..cb15acd 100644 (file)
@@ -121,6 +121,10 @@ static struct quirk_entry quirk_asus_tablet_mode = {
        .tablet_switch_mode = asus_wmi_lid_flip_rog_devid,
 };
 
+static struct quirk_entry quirk_asus_ignore_fan = {
+       .wmi_ignore_fan = true,
+};
+
 static int dmi_matched(const struct dmi_system_id *dmi)
 {
        pr_info("Identified laptop model '%s'\n", dmi->ident);
@@ -473,6 +477,15 @@ static const struct dmi_system_id asus_quirks[] = {
                },
                .driver_data = &quirk_asus_tablet_mode,
        },
+       {
+               .callback = dmi_matched,
+               .ident = "ASUS VivoBook E410MA",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "E410MA"),
+               },
+               .driver_data = &quirk_asus_ignore_fan,
+       },
        {},
 };
 
@@ -511,6 +524,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0x30, { KEY_VOLUMEUP } },
        { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
        { KE_KEY, 0x32, { KEY_MUTE } },
+       { KE_KEY, 0x33, { KEY_SCREENLOCK } },
        { KE_KEY, 0x35, { KEY_SCREENLOCK } },
        { KE_KEY, 0x38, { KEY_PROG3 } }, /* Armoury Crate */
        { KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
@@ -544,6 +558,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
        { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
        { KE_KEY, 0x82, { KEY_CAMERA } },
+       { KE_KEY, 0x85, { KEY_CAMERA } },
        { KE_KEY, 0x86, { KEY_PROG1 } }, /* MyASUS Key */
        { KE_KEY, 0x88, { KEY_RFKILL  } }, /* Radio Toggle Key */
        { KE_KEY, 0x8A, { KEY_PROG1 } }, /* Color enhancement mode */
index 6f81b28..104188d 100644 (file)
@@ -2243,7 +2243,9 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
        asus->fan_type = FAN_TYPE_NONE;
        asus->agfn_pwm = -1;
 
-       if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL))
+       if (asus->driver->quirks->wmi_ignore_fan)
+               asus->fan_type = FAN_TYPE_NONE;
+       else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL))
                asus->fan_type = FAN_TYPE_SPEC83;
        else if (asus_wmi_has_agfn_fan(asus))
                asus->fan_type = FAN_TYPE_AGFN;
@@ -2436,6 +2438,9 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
 
        *available = false;
 
+       if (asus->fan_type == FAN_TYPE_NONE)
+               return 0;
+
        err = fan_curve_get_factory_default(asus, fan_dev);
        if (err) {
                return 0;
index 6531699..a478ebf 100644 (file)
@@ -38,6 +38,7 @@ struct quirk_entry {
        bool store_backlight_power;
        bool wmi_backlight_set_devstate;
        bool wmi_force_als_set;
+       bool wmi_ignore_fan;
        enum asus_wmi_tablet_switch_mode tablet_switch_mode;
        int wapf;
        /*
index c82b3d6..c517bd4 100644 (file)
@@ -61,7 +61,7 @@ static const struct key_entry dell_wmi_keymap_type_0012[] = {
        /* privacy mic mute */
        { KE_KEY, 0x0001, { KEY_MICMUTE } },
        /* privacy camera mute */
-       { KE_SW,  0x0002, { SW_CAMERA_LENS_COVER } },
+       { KE_VSW, 0x0002, { SW_CAMERA_LENS_COVER } },
        { KE_END, 0},
 };
 
@@ -115,11 +115,15 @@ bool dell_privacy_process_event(int type, int code, int status)
 
        switch (code) {
        case DELL_PRIVACY_AUDIO_EVENT: /* Mic mute */
-       case DELL_PRIVACY_CAMERA_EVENT: /* Camera mute */
                priv->last_status = status;
                sparse_keymap_report_entry(priv->input_dev, key, 1, true);
                ret = true;
                break;
+       case DELL_PRIVACY_CAMERA_EVENT: /* Camera mute */
+               priv->last_status = status;
+               sparse_keymap_report_entry(priv->input_dev, key, !(status & CAMERA_STATUS), false);
+               ret = true;
+               break;
        default:
                dev_dbg(&priv->wdev->dev, "unknown event type 0x%04x 0x%04x\n", type, code);
        }
@@ -292,7 +296,7 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
 {
        struct privacy_wmi_data *priv;
        struct key_entry *keymap;
-       int ret, i;
+       int ret, i, j;
 
        ret = wmi_has_guid(DELL_PRIVACY_GUID);
        if (!ret)
@@ -304,6 +308,11 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
 
        dev_set_drvdata(&wdev->dev, priv);
        priv->wdev = wdev;
+
+       ret = get_current_status(priv->wdev);
+       if (ret)
+               return ret;
+
        /* create evdev passing interface */
        priv->input_dev = devm_input_allocate_device(&wdev->dev);
        if (!priv->input_dev)
@@ -318,9 +327,20 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
        /* remap the keymap code with Dell privacy key type 0x12 as prefix
         * KEY_MICMUTE scancode will be reported as 0x120001
         */
-       for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0012); i++) {
-               keymap[i] = dell_wmi_keymap_type_0012[i];
-               keymap[i].code |= (0x0012 << 16);
+       for (i = 0, j = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0012); i++) {
+               /*
+                * Unlike keys where only presses matter, userspace may act
+                * on switches in both of their positions. Only register
+                * SW_CAMERA_LENS_COVER if it is actually there.
+                */
+               if (dell_wmi_keymap_type_0012[i].type == KE_VSW &&
+                   dell_wmi_keymap_type_0012[i].sw.code == SW_CAMERA_LENS_COVER &&
+                   !(priv->features_present & BIT(DELL_PRIVACY_TYPE_CAMERA)))
+                       continue;
+
+               keymap[j] = dell_wmi_keymap_type_0012[i];
+               keymap[j].code |= (0x0012 << 16);
+               j++;
        }
        ret = sparse_keymap_setup(priv->input_dev, keymap, NULL);
        kfree(keymap);
@@ -331,11 +351,12 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
        priv->input_dev->name = "Dell Privacy Driver";
        priv->input_dev->id.bustype = BUS_HOST;
 
-       ret = input_register_device(priv->input_dev);
-       if (ret)
-               return ret;
+       /* Report initial camera-cover status */
+       if (priv->features_present & BIT(DELL_PRIVACY_TYPE_CAMERA))
+               input_report_switch(priv->input_dev, SW_CAMERA_LENS_COVER,
+                                   !(priv->last_status & CAMERA_STATUS));
 
-       ret = get_current_status(priv->wdev);
+       ret = input_register_device(priv->input_dev);
        if (ret)
                return ret;
 
index 435d2d3..0eb5bfd 100644 (file)
@@ -1621,6 +1621,12 @@ static const struct dmi_system_id set_fn_lock_led_list[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion R7000P2020H"),
                }
        },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion 5 15ARH05"),
+               }
+       },
        {}
 };
 
index b2342b3..74dc2cf 100644 (file)
@@ -181,6 +181,9 @@ int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
                return PTR_ERR(int3472->regulator.gpio);
        }
 
+       /* Ensure the pin is in output mode and non-active state */
+       gpiod_direction_output(int3472->regulator.gpio, 0);
+
        cfg.dev = &int3472->adev->dev;
        cfg.init_data = &init_data;
        cfg.ena_gpiod = int3472->regulator.gpio;
index 974a132..c42c3fa 100644 (file)
@@ -168,6 +168,8 @@ static int skl_int3472_map_gpio_to_clk(struct int3472_discrete_device *int3472,
                        return (PTR_ERR(gpio));
 
                int3472->clock.ena_gpio = gpio;
+               /* Ensure the pin is in output mode and non-active state */
+               gpiod_direction_output(int3472->clock.ena_gpio, 0);
                break;
        case INT3472_GPIO_TYPE_PRIVACY_LED:
                gpio = acpi_get_and_request_gpiod(path, pin, "int3472,privacy-led");
@@ -175,6 +177,8 @@ static int skl_int3472_map_gpio_to_clk(struct int3472_discrete_device *int3472,
                        return (PTR_ERR(gpio));
 
                int3472->clock.led_gpio = gpio;
+               /* Ensure the pin is in output mode and non-active state */
+               gpiod_direction_output(int3472->clock.led_gpio, 0);
                break;
        default:
                dev_err(int3472->dev, "Invalid GPIO type 0x%02x for clock\n", type);
index f1d802f..3a15d32 100644 (file)
@@ -1029,6 +1029,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,          adl_core_init),
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,        adl_core_init),
        X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE,          mtl_core_init),
+       X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L,        mtl_core_init),
        {}
 };
 
index ca76076..b362241 100644 (file)
@@ -46,7 +46,8 @@ static struct {
        {SIMATIC_IPC_IPC427D, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE},
        {SIMATIC_IPC_IPC427E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_427E},
        {SIMATIC_IPC_IPC477E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_427E},
-       {SIMATIC_IPC_IPC427G, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
+       {SIMATIC_IPC_IPCBX_39A, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
+       {SIMATIC_IPC_IPCPX_39A, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227G},
 };
 
 static int register_platform_devices(u32 station_id)
index 7156ae2..537d6a2 100644 (file)
@@ -1887,14 +1887,21 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
                break;
        }
 
-       ret = sony_call_snc_handle(handle, probe_base, &result);
-       if (ret)
-               return ret;
+       /*
+        * Only probe if there is a separate probe_base, otherwise the probe call
+        * is equivalent to __sony_nc_kbd_backlight_mode_set(0), resulting in
+        * the keyboard backlight being turned off.
+        */
+       if (probe_base) {
+               ret = sony_call_snc_handle(handle, probe_base, &result);
+               if (ret)
+                       return ret;
 
-       if ((handle == 0x0137 && !(result & 0x02)) ||
-                       !(result & 0x01)) {
-               dprintk("no backlight keyboard found\n");
-               return 0;
+               if ((handle == 0x0137 && !(result & 0x02)) ||
+                               !(result & 0x01)) {
+                       dprintk("no backlight keyboard found\n");
+                       return 0;
+               }
        }
 
        kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL);
index 1195293..a959468 100644 (file)
@@ -10311,9 +10311,11 @@ static DEFINE_MUTEX(dytc_mutex);
 static int dytc_capabilities;
 static bool dytc_mmc_get_available;
 
-static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *profile)
+static int convert_dytc_to_profile(int funcmode, int dytcmode,
+               enum platform_profile_option *profile)
 {
-       if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
+       switch (funcmode) {
+       case DYTC_FUNCTION_MMC:
                switch (dytcmode) {
                case DYTC_MODE_MMC_LOWPOWER:
                        *profile = PLATFORM_PROFILE_LOW_POWER;
@@ -10329,8 +10331,7 @@ static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *p
                        return -EINVAL;
                }
                return 0;
-       }
-       if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
+       case DYTC_FUNCTION_PSC:
                switch (dytcmode) {
                case DYTC_MODE_PSC_LOWPOWER:
                        *profile = PLATFORM_PROFILE_LOW_POWER;
@@ -10344,6 +10345,14 @@ static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *p
                default: /* Unknown mode */
                        return -EINVAL;
                }
+               return 0;
+       case DYTC_FUNCTION_AMT:
+               /* For now return balanced. It's the closest we have to 'auto' */
+               *profile =  PLATFORM_PROFILE_BALANCED;
+               return 0;
+       default:
+               /* Unknown function */
+               return -EOPNOTSUPP;
        }
        return 0;
 }
@@ -10492,6 +10501,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
                err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output);
                if (err)
                        goto unlock;
+
                /* system supports AMT, activate it when on balanced */
                if (dytc_capabilities & BIT(DYTC_FC_AMT))
                        dytc_control_amt(profile == PLATFORM_PROFILE_BALANCED);
@@ -10507,7 +10517,7 @@ static void dytc_profile_refresh(void)
 {
        enum platform_profile_option profile;
        int output, err = 0;
-       int perfmode;
+       int perfmode, funcmode;
 
        mutex_lock(&dytc_mutex);
        if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
@@ -10522,8 +10532,9 @@ static void dytc_profile_refresh(void)
        if (err)
                return;
 
+       funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
        perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
-       convert_dytc_to_profile(perfmode, &profile);
+       convert_dytc_to_profile(funcmode, perfmode, &profile);
        if (profile != dytc_current_profile) {
                dytc_current_profile = profile;
                platform_profile_notify();
index baae312..f009953 100644 (file)
@@ -264,6 +264,23 @@ static const struct ts_dmi_data connect_tablet9_data = {
        .properties     = connect_tablet9_props,
 };
 
+static const struct property_entry csl_panther_tab_hd_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 20),
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1526),
+       PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+       PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"),
+       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+       { }
+};
+
+static const struct ts_dmi_data csl_panther_tab_hd_data = {
+       .acpi_name      = "MSSL1680:00",
+       .properties     = csl_panther_tab_hd_props,
+};
+
 static const struct property_entry cube_iwork8_air_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
        PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
@@ -1125,6 +1142,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                },
        },
        {
+               /* CSL Panther Tab HD */
+               .driver_data = (void *)&csl_panther_tab_hd_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "CSL Computer GmbH & Co. KG"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "CSL Panther Tab HD"),
+               },
+       },
+       {
                /* CUBE iwork8 Air */
                .driver_data = (void *)&cube_iwork8_air_data,
                .matches = {
index e01b32d..00828f5 100644 (file)
@@ -498,6 +498,12 @@ static int da9211_i2c_probe(struct i2c_client *i2c)
 
        chip->chip_irq = i2c->irq;
 
+       ret = da9211_regulator_init(chip);
+       if (ret < 0) {
+               dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
+               return ret;
+       }
+
        if (chip->chip_irq != 0) {
                ret = devm_request_threaded_irq(chip->dev, chip->chip_irq, NULL,
                                        da9211_irq_handler,
@@ -512,11 +518,6 @@ static int da9211_i2c_probe(struct i2c_client *i2c)
                dev_warn(chip->dev, "No IRQ configured\n");
        }
 
-       ret = da9211_regulator_init(chip);
-
-       if (ret < 0)
-               dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
-
        return ret;
 }
 
index 43b5b93..ae60213 100644 (file)
@@ -1016,7 +1016,7 @@ static const struct rpmh_vreg_init_data pm8550_vreg_data[] = {
        RPMH_VREG("ldo8",   "ldo%s8",  &pmic5_pldo_lv, "vdd-l8-l9"),
        RPMH_VREG("ldo9",   "ldo%s9",  &pmic5_pldo,    "vdd-l8-l9"),
        RPMH_VREG("ldo10",  "ldo%s10", &pmic5_nldo,    "vdd-l1-l4-l10"),
-       RPMH_VREG("ldo11",  "ldo%s11", &pmic5_pldo,    "vdd-l11"),
+       RPMH_VREG("ldo11",  "ldo%s11", &pmic5_nldo,    "vdd-l11"),
        RPMH_VREG("ldo12",  "ldo%s12", &pmic5_pldo,    "vdd-l12"),
        RPMH_VREG("ldo13",  "ldo%s13", &pmic5_pldo,    "vdd-l2-l13-l14"),
        RPMH_VREG("ldo14",  "ldo%s14", &pmic5_pldo,    "vdd-l2-l13-l14"),
index 41ba22f..8c038cc 100644 (file)
@@ -162,7 +162,7 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
 {
        if (hisi_hba->hw->slot_index_alloc ||
-           slot_idx >= HISI_SAS_UNRESERVED_IPTT) {
+           slot_idx < HISI_SAS_RESERVED_IPTT) {
                spin_lock(&hisi_hba->lock);
                hisi_sas_slot_index_clear(hisi_hba, slot_idx);
                spin_unlock(&hisi_hba->lock);
@@ -704,7 +704,7 @@ static int hisi_sas_init_device(struct domain_device *device)
                int_to_scsilun(0, &lun);
 
                while (retry-- > 0) {
-                       rc = sas_clear_task_set(device, lun.scsi_lun);
+                       rc = sas_abort_task_set(device, lun.scsi_lun);
                        if (rc == TMF_RESP_FUNC_COMPLETE) {
                                hisi_sas_release_task(hisi_hba, device);
                                break;
@@ -1316,7 +1316,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
                                device->linkrate = phy->sas_phy.linkrate;
 
                        hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
-               } else
+               } else if (!port->port_attached)
                        port->id = 0xff;
        }
 }
index 1ccce70..5e80225 100644 (file)
@@ -889,7 +889,9 @@ void sas_ata_device_link_abort(struct domain_device *device, bool force_reset)
 {
        struct ata_port *ap = device->sata_dev.ap;
        struct ata_link *link = &ap->link;
+       unsigned long flags;
 
+       spin_lock_irqsave(ap->lock, flags);
        device->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */
        device->sata_dev.fis[3] = ATA_ABORTED; /* tf error */
 
@@ -897,6 +899,7 @@ void sas_ata_device_link_abort(struct domain_device *device, bool force_reset)
        if (force_reset)
                link->eh_info.action |= ATA_EH_RESET;
        ata_link_abort(link);
+       spin_unlock_irqrestore(ap->lock, flags);
 }
 EXPORT_SYMBOL_GPL(sas_ata_device_link_abort);
 
index ef86ca4..3bf8cf3 100644 (file)
@@ -1,5 +1,5 @@
 # mpi3mr makefile
-obj-m += mpi3mr.o
+obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr.o
 mpi3mr-y +=  mpi3mr_os.o     \
                mpi3mr_fw.o \
                mpi3mr_app.o \
index 0c4aaba..286a445 100644 (file)
@@ -3633,8 +3633,7 @@ int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
        int i, retval = 0, capb = 0;
        u16 message_control;
        u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
-           (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) &&
-           (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
+           ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
 
        if (pci_enable_device_mem(pdev)) {
                ioc_err(mrioc, "pci_enable_device_mem: failed\n");
index 4e981cc..6906154 100644 (file)
@@ -2992,8 +2992,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
        struct sysinfo s;
        u64 coherent_dma_mask, dma_mask;
 
-       if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4 ||
-           dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32)) {
+       if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4) {
                ioc->dma_mask = 32;
                coherent_dma_mask = dma_mask = DMA_BIT_MASK(32);
        /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
index cc69538..8553277 100644 (file)
@@ -1511,8 +1511,6 @@ static int inquiry_vpd_b0(unsigned char *arr)
        put_unaligned_be64(sdebug_write_same_length, &arr[32]);
 
        return 0x3c; /* Mandatory page length for Logical Block Provisioning */
-
-       return sizeof(vpdb0_data);
 }
 
 /* Block device characteristics VPD page (SBC-3) */
index a7960ad..2aa2c2a 100644 (file)
@@ -231,6 +231,11 @@ scsi_abort_command(struct scsi_cmnd *scmd)
        struct Scsi_Host *shost = sdev->host;
        unsigned long flags;
 
+       if (!shost->hostt->eh_abort_handler) {
+               /* No abort handler, fail command directly */
+               return FAILED;
+       }
+
        if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
                /*
                 * Retry after abort failed, escalate to next level.
index 13cfd3e..b9b9730 100644 (file)
@@ -1677,6 +1677,13 @@ static const char *iscsi_session_state_name(int state)
        return name;
 }
 
+static char *iscsi_session_target_state_name[] = {
+       [ISCSI_SESSION_TARGET_UNBOUND]   = "UNBOUND",
+       [ISCSI_SESSION_TARGET_ALLOCATED] = "ALLOCATED",
+       [ISCSI_SESSION_TARGET_SCANNED]   = "SCANNED",
+       [ISCSI_SESSION_TARGET_UNBINDING] = "UNBINDING",
+};
+
 int iscsi_session_chkready(struct iscsi_cls_session *session)
 {
        int err;
@@ -1786,9 +1793,13 @@ static int iscsi_user_scan_session(struct device *dev, void *data)
                if ((scan_data->channel == SCAN_WILD_CARD ||
                     scan_data->channel == 0) &&
                    (scan_data->id == SCAN_WILD_CARD ||
-                    scan_data->id == id))
+                    scan_data->id == id)) {
                        scsi_scan_target(&session->dev, 0, id,
                                         scan_data->lun, scan_data->rescan);
+                       spin_lock_irqsave(&session->lock, flags);
+                       session->target_state = ISCSI_SESSION_TARGET_SCANNED;
+                       spin_unlock_irqrestore(&session->lock, flags);
+               }
        }
 
 user_scan_exit:
@@ -1961,31 +1972,41 @@ static void __iscsi_unbind_session(struct work_struct *work)
        struct iscsi_cls_host *ihost = shost->shost_data;
        unsigned long flags;
        unsigned int target_id;
+       bool remove_target = true;
 
        ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n");
 
        /* Prevent new scans and make sure scanning is not in progress */
        mutex_lock(&ihost->mutex);
        spin_lock_irqsave(&session->lock, flags);
-       if (session->target_id == ISCSI_MAX_TARGET) {
+       if (session->target_state == ISCSI_SESSION_TARGET_ALLOCATED) {
+               remove_target = false;
+       } else if (session->target_state != ISCSI_SESSION_TARGET_SCANNED) {
                spin_unlock_irqrestore(&session->lock, flags);
                mutex_unlock(&ihost->mutex);
-               goto unbind_session_exit;
+               ISCSI_DBG_TRANS_SESSION(session,
+                       "Skipping target unbinding: Session is unbound/unbinding.\n");
+               return;
        }
 
+       session->target_state = ISCSI_SESSION_TARGET_UNBINDING;
        target_id = session->target_id;
        session->target_id = ISCSI_MAX_TARGET;
        spin_unlock_irqrestore(&session->lock, flags);
        mutex_unlock(&ihost->mutex);
 
-       scsi_remove_target(&session->dev);
+       if (remove_target)
+               scsi_remove_target(&session->dev);
 
        if (session->ida_used)
                ida_free(&iscsi_sess_ida, target_id);
 
-unbind_session_exit:
        iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
        ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
+
+       spin_lock_irqsave(&session->lock, flags);
+       session->target_state = ISCSI_SESSION_TARGET_UNBOUND;
+       spin_unlock_irqrestore(&session->lock, flags);
 }
 
 static void __iscsi_destroy_session(struct work_struct *work)
@@ -2062,6 +2083,9 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
                session->ida_used = true;
        } else
                session->target_id = target_id;
+       spin_lock_irqsave(&session->lock, flags);
+       session->target_state = ISCSI_SESSION_TARGET_ALLOCATED;
+       spin_unlock_irqrestore(&session->lock, flags);
 
        dev_set_name(&session->dev, "session%u", session->sid);
        err = device_add(&session->dev);
@@ -4370,6 +4394,19 @@ iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0);
 iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0);
 
 static ssize_t
+show_priv_session_target_state(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+
+       return sysfs_emit(buf, "%s\n",
+                       iscsi_session_target_state_name[session->target_state]);
+}
+
+static ISCSI_CLASS_ATTR(priv_sess, target_state, S_IRUGO,
+                       show_priv_session_target_state, NULL);
+
+static ssize_t
 show_priv_session_state(struct device *dev, struct device_attribute *attr,
                        char *buf)
 {
@@ -4471,6 +4508,7 @@ static struct attribute *iscsi_session_attrs[] = {
        &dev_attr_sess_boot_target.attr,
        &dev_attr_priv_sess_recovery_tmo.attr,
        &dev_attr_priv_sess_state.attr,
+       &dev_attr_priv_sess_target_state.attr,
        &dev_attr_priv_sess_creator.attr,
        &dev_attr_sess_chap_out_idx.attr,
        &dev_attr_sess_chap_in_idx.attr,
@@ -4584,6 +4622,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
                return S_IRUGO | S_IWUSR;
        else if (attr == &dev_attr_priv_sess_state.attr)
                return S_IRUGO;
+       else if (attr == &dev_attr_priv_sess_target_state.attr)
+               return S_IRUGO;
        else if (attr == &dev_attr_priv_sess_creator.attr)
                return S_IRUGO;
        else if (attr == &dev_attr_priv_sess_target_id.attr)
index d7a84c0..22705eb 100644 (file)
@@ -1823,6 +1823,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
        ret = storvsc_do_io(dev, cmd_request, get_cpu());
        put_cpu();
 
+       if (ret)
+               scsi_dma_unmap(scmnd);
+
        if (ret == -EAGAIN) {
                /* no more space */
                ret = SCSI_MLQUEUE_DEVICE_BUSY;
index 66b316d..71a3bb8 100644 (file)
@@ -995,7 +995,7 @@ static int scsifront_suspend(struct xenbus_device *dev)
        return err;
 }
 
-static int scsifront_remove(struct xenbus_device *dev)
+static void scsifront_remove(struct xenbus_device *dev)
 {
        struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
 
@@ -1011,8 +1011,6 @@ static int scsifront_remove(struct xenbus_device *dev)
 
        scsifront_free_ring(info);
        scsi_host_put(info->host);
-
-       return 0;
 }
 
 static void scsifront_disconnect(struct vscsifrnt_info *info)
index 520b4cc..91db3c9 100644 (file)
 #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
        FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
                ((op)->data.nbytes >> 16) & 0xffff) | \
-       FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, (op)->dummy.nbytes * 8))
+       FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
+                 (op)->dummy.buswidth != 0 ? \
+                 (((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
+                 0))
 
 #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
        FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_BANK, chipsel) | \
index 6de8360..9eab6c2 100644 (file)
@@ -1253,6 +1253,11 @@ static int mtk_spi_probe(struct platform_device *pdev)
                dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
                           addr_bits, ret);
 
+       ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
+                              IRQF_TRIGGER_NONE, dev_name(dev), master);
+       if (ret)
+               return dev_err_probe(dev, ret, "failed to register irq\n");
+
        pm_runtime_enable(dev);
 
        ret = devm_spi_register_master(dev, master);
@@ -1261,13 +1266,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
                return dev_err_probe(dev, ret, "failed to register master\n");
        }
 
-       ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
-                              IRQF_TRIGGER_NONE, dev_name(dev), master);
-       if (ret) {
-               pm_runtime_disable(dev);
-               return dev_err_probe(dev, ret, "failed to register irq\n");
-       }
-
        return 0;
 }
 
index 3cc7bb4..15f174f 100644 (file)
@@ -2310,7 +2310,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
        if (!of_property_read_u32(nc, "spi-max-frequency", &value))
                spi->max_speed_hz = value;
 
-       if (!of_property_read_u16(nc, "spi-cs-setup-ns", &cs_setup)) {
+       if (!of_property_read_u16(nc, "spi-cs-setup-delay-ns", &cs_setup)) {
                spi->cs_setup.value = cs_setup;
                spi->cs_setup.unit = SPI_DELAY_UNIT_NSECS;
        }
index 6313e7d..1935ca6 100644 (file)
@@ -68,7 +68,7 @@ static_assert(N_SPI_MINORS > 0 && N_SPI_MINORS <= 256);
 
 struct spidev_data {
        dev_t                   devt;
-       spinlock_t              spi_lock;
+       struct mutex            spi_lock;
        struct spi_device       *spi;
        struct list_head        device_entry;
 
@@ -95,9 +95,8 @@ spidev_sync(struct spidev_data *spidev, struct spi_message *message)
        int status;
        struct spi_device *spi;
 
-       spin_lock_irq(&spidev->spi_lock);
+       mutex_lock(&spidev->spi_lock);
        spi = spidev->spi;
-       spin_unlock_irq(&spidev->spi_lock);
 
        if (spi == NULL)
                status = -ESHUTDOWN;
@@ -107,6 +106,7 @@ spidev_sync(struct spidev_data *spidev, struct spi_message *message)
        if (status == 0)
                status = message->actual_length;
 
+       mutex_unlock(&spidev->spi_lock);
        return status;
 }
 
@@ -359,12 +359,12 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
         * we issue this ioctl.
         */
        spidev = filp->private_data;
-       spin_lock_irq(&spidev->spi_lock);
+       mutex_lock(&spidev->spi_lock);
        spi = spi_dev_get(spidev->spi);
-       spin_unlock_irq(&spidev->spi_lock);
-
-       if (spi == NULL)
+       if (spi == NULL) {
+               mutex_unlock(&spidev->spi_lock);
                return -ESHUTDOWN;
+       }
 
        /* use the buffer lock here for triple duty:
         *  - prevent I/O (from us) so calling spi_setup() is safe;
@@ -508,6 +508,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
        mutex_unlock(&spidev->buf_lock);
        spi_dev_put(spi);
+       mutex_unlock(&spidev->spi_lock);
        return retval;
 }
 
@@ -529,12 +530,12 @@ spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
         * we issue this ioctl.
         */
        spidev = filp->private_data;
-       spin_lock_irq(&spidev->spi_lock);
+       mutex_lock(&spidev->spi_lock);
        spi = spi_dev_get(spidev->spi);
-       spin_unlock_irq(&spidev->spi_lock);
-
-       if (spi == NULL)
+       if (spi == NULL) {
+               mutex_unlock(&spidev->spi_lock);
                return -ESHUTDOWN;
+       }
 
        /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
        mutex_lock(&spidev->buf_lock);
@@ -561,6 +562,7 @@ spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
 done:
        mutex_unlock(&spidev->buf_lock);
        spi_dev_put(spi);
+       mutex_unlock(&spidev->spi_lock);
        return retval;
 }
 
@@ -601,7 +603,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
        if (!spidev->tx_buffer) {
                spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
                if (!spidev->tx_buffer) {
-                       dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
                        status = -ENOMEM;
                        goto err_find_dev;
                }
@@ -610,7 +611,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
        if (!spidev->rx_buffer) {
                spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
                if (!spidev->rx_buffer) {
-                       dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
                        status = -ENOMEM;
                        goto err_alloc_rx_buf;
                }
@@ -640,10 +640,10 @@ static int spidev_release(struct inode *inode, struct file *filp)
        spidev = filp->private_data;
        filp->private_data = NULL;
 
-       spin_lock_irq(&spidev->spi_lock);
+       mutex_lock(&spidev->spi_lock);
        /* ... after we unbound from the underlying device? */
        dofree = (spidev->spi == NULL);
-       spin_unlock_irq(&spidev->spi_lock);
+       mutex_unlock(&spidev->spi_lock);
 
        /* last close? */
        spidev->users--;
@@ -776,7 +776,7 @@ static int spidev_probe(struct spi_device *spi)
 
        /* Initialize the driver data */
        spidev->spi = spi;
-       spin_lock_init(&spidev->spi_lock);
+       mutex_init(&spidev->spi_lock);
        mutex_init(&spidev->buf_lock);
 
        INIT_LIST_HEAD(&spidev->device_entry);
@@ -821,9 +821,9 @@ static void spidev_remove(struct spi_device *spi)
        /* prevent new opens */
        mutex_lock(&device_list_lock);
        /* make sure ops on existing fds can abort cleanly */
-       spin_lock_irq(&spidev->spi_lock);
+       mutex_lock(&spidev->spi_lock);
        spidev->spi = NULL;
-       spin_unlock_irq(&spidev->spi_lock);
+       mutex_unlock(&spidev->spi_lock);
 
        list_del(&spidev->device_entry);
        device_destroy(spidev_class, spidev->devt);
index 7c23112..5bddb2f 100644 (file)
@@ -52,17 +52,22 @@ static DEFINE_SPINLOCK(xencons_lock);
 
 static struct xencons_info *vtermno_to_xencons(int vtermno)
 {
-       struct xencons_info *entry, *n, *ret = NULL;
+       struct xencons_info *entry, *ret = NULL;
+       unsigned long flags;
 
-       if (list_empty(&xenconsoles))
-                       return NULL;
+       spin_lock_irqsave(&xencons_lock, flags);
+       if (list_empty(&xenconsoles)) {
+               spin_unlock_irqrestore(&xencons_lock, flags);
+               return NULL;
+       }
 
-       list_for_each_entry_safe(entry, n, &xenconsoles, list) {
+       list_for_each_entry(entry, &xenconsoles, list) {
                if (entry->vtermno == vtermno) {
                        ret  = entry;
                        break;
                }
        }
+       spin_unlock_irqrestore(&xencons_lock, flags);
 
        return ret;
 }
@@ -223,7 +228,7 @@ static int xen_hvm_console_init(void)
 {
        int r;
        uint64_t v = 0;
-       unsigned long gfn;
+       unsigned long gfn, flags;
        struct xencons_info *info;
 
        if (!xen_hvm_domain())
@@ -258,9 +263,9 @@ static int xen_hvm_console_init(void)
                goto err;
        info->vtermno = HVC_COOKIE;
 
-       spin_lock(&xencons_lock);
+       spin_lock_irqsave(&xencons_lock, flags);
        list_add_tail(&info->list, &xenconsoles);
-       spin_unlock(&xencons_lock);
+       spin_unlock_irqrestore(&xencons_lock, flags);
 
        return 0;
 err:
@@ -283,6 +288,7 @@ static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
 static int xen_pv_console_init(void)
 {
        struct xencons_info *info;
+       unsigned long flags;
 
        if (!xen_pv_domain())
                return -ENODEV;
@@ -299,9 +305,9 @@ static int xen_pv_console_init(void)
                /* already configured */
                return 0;
        }
-       spin_lock(&xencons_lock);
+       spin_lock_irqsave(&xencons_lock, flags);
        xencons_info_pv_init(info, HVC_COOKIE);
-       spin_unlock(&xencons_lock);
+       spin_unlock_irqrestore(&xencons_lock, flags);
 
        return 0;
 }
@@ -309,6 +315,7 @@ static int xen_pv_console_init(void)
 static int xen_initial_domain_console_init(void)
 {
        struct xencons_info *info;
+       unsigned long flags;
 
        if (!xen_initial_domain())
                return -ENODEV;
@@ -323,9 +330,9 @@ static int xen_initial_domain_console_init(void)
        info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
        info->vtermno = HVC_COOKIE;
 
-       spin_lock(&xencons_lock);
+       spin_lock_irqsave(&xencons_lock, flags);
        list_add_tail(&info->list, &xenconsoles);
-       spin_unlock(&xencons_lock);
+       spin_unlock_irqrestore(&xencons_lock, flags);
 
        return 0;
 }
@@ -380,10 +387,12 @@ static void xencons_free(struct xencons_info *info)
 
 static int xen_console_remove(struct xencons_info *info)
 {
+       unsigned long flags;
+
        xencons_disconnect_backend(info);
-       spin_lock(&xencons_lock);
+       spin_lock_irqsave(&xencons_lock, flags);
        list_del(&info->list);
-       spin_unlock(&xencons_lock);
+       spin_unlock_irqrestore(&xencons_lock, flags);
        if (info->xbdev != NULL)
                xencons_free(info);
        else {
@@ -394,9 +403,9 @@ static int xen_console_remove(struct xencons_info *info)
        return 0;
 }
 
-static int xencons_remove(struct xenbus_device *dev)
+static void xencons_remove(struct xenbus_device *dev)
 {
-       return xen_console_remove(dev_get_drvdata(&dev->dev));
+       xen_console_remove(dev_get_drvdata(&dev->dev));
 }
 
 static int xencons_connect_backend(struct xenbus_device *dev,
@@ -464,6 +473,7 @@ static int xencons_probe(struct xenbus_device *dev,
 {
        int ret, devid;
        struct xencons_info *info;
+       unsigned long flags;
 
        devid = dev->nodename[strlen(dev->nodename) - 1] - '0';
        if (devid == 0)
@@ -482,9 +492,9 @@ static int xencons_probe(struct xenbus_device *dev,
        ret = xencons_connect_backend(dev, info);
        if (ret < 0)
                goto error;
-       spin_lock(&xencons_lock);
+       spin_lock_irqsave(&xencons_lock, flags);
        list_add_tail(&info->list, &xenconsoles);
-       spin_unlock(&xencons_lock);
+       spin_unlock_irqrestore(&xencons_lock, flags);
 
        return 0;
 
@@ -584,10 +594,12 @@ static int __init xen_hvc_init(void)
 
        info->hvc = hvc_alloc(HVC_COOKIE, info->irq, ops, 256);
        if (IS_ERR(info->hvc)) {
+               unsigned long flags;
+
                r = PTR_ERR(info->hvc);
-               spin_lock(&xencons_lock);
+               spin_lock_irqsave(&xencons_lock, flags);
                list_del(&info->list);
-               spin_unlock(&xencons_lock);
+               spin_unlock_irqrestore(&xencons_lock, flags);
                if (info->irq)
                        unbind_from_irqhandler(info->irq, NULL);
                kfree(info);
index e18c9f4..bda61be 100644 (file)
@@ -6056,6 +6056,14 @@ void ufshcd_schedule_eh_work(struct ufs_hba *hba)
        }
 }
 
+static void ufshcd_force_error_recovery(struct ufs_hba *hba)
+{
+       spin_lock_irq(hba->host->host_lock);
+       hba->force_reset = true;
+       ufshcd_schedule_eh_work(hba);
+       spin_unlock_irq(hba->host->host_lock);
+}
+
 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
 {
        down_write(&hba->clk_scaling_lock);
@@ -9083,6 +9091,15 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 
                if (!hba->dev_info.b_rpm_dev_flush_capable) {
                        ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
+                       if (ret && pm_op != UFS_SHUTDOWN_PM) {
+                               /*
+                                * If return err in suspend flow, IO will hang.
+                                * Trigger error handler and break suspend for
+                                * error recovery.
+                                */
+                               ufshcd_force_error_recovery(hba);
+                               ret = -EBUSY;
+                       }
                        if (ret)
                                goto enable_scaling;
                }
@@ -9094,6 +9111,15 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
         */
        check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
        ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
+       if (ret && pm_op != UFS_SHUTDOWN_PM) {
+               /*
+                * If return err in suspend flow, IO will hang.
+                * Trigger error handler and break suspend for
+                * error recovery.
+                */
+               ufshcd_force_error_recovery(hba);
+               ret = -EBUSY;
+       }
        if (ret)
                goto set_dev_active;
 
index de1b091..46fdab9 100644 (file)
@@ -1530,15 +1530,13 @@ static void xenhcd_backend_changed(struct xenbus_device *dev,
        }
 }
 
-static int xenhcd_remove(struct xenbus_device *dev)
+static void xenhcd_remove(struct xenbus_device *dev)
 {
        struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
        struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
 
        xenhcd_destroy_rings(info);
        usb_put_hcd(hcd);
-
-       return 0;
 }
 
 static int xenhcd_probe(struct xenbus_device *dev,
index 8752d38..d7f3e62 100644 (file)
@@ -67,7 +67,7 @@ MODULE_PARM_DESC(video,
        "Video memory size in MB, width, height in pixels (default 2,800,600)");
 
 static void xenfb_make_preferred_console(void);
-static int xenfb_remove(struct xenbus_device *);
+static void xenfb_remove(struct xenbus_device *);
 static void xenfb_init_shared_page(struct xenfb_info *, struct fb_info *);
 static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
 static void xenfb_disconnect_backend(struct xenfb_info *);
@@ -523,7 +523,7 @@ static int xenfb_resume(struct xenbus_device *dev)
        return xenfb_connect_backend(dev, info);
 }
 
-static int xenfb_remove(struct xenbus_device *dev)
+static void xenfb_remove(struct xenbus_device *dev)
 {
        struct xenfb_info *info = dev_get_drvdata(&dev->dev);
 
@@ -538,8 +538,6 @@ static int xenfb_remove(struct xenbus_device *dev)
        vfree(info->gfns);
        vfree(info->fb);
        kfree(info);
-
-       return 0;
 }
 
 static unsigned long vmalloc_to_gfn(void *address)
index 28b2a1f..0d4f8f4 100644 (file)
@@ -1181,9 +1181,8 @@ static void pvcalls_back_changed(struct xenbus_device *dev,
        }
 }
 
-static int pvcalls_back_remove(struct xenbus_device *dev)
+static void pvcalls_back_remove(struct xenbus_device *dev)
 {
-       return 0;
 }
 
 static int pvcalls_back_uevent(struct xenbus_device *xdev,
index 1826e8e..d5d589b 100644 (file)
@@ -225,6 +225,8 @@ again:
        return IRQ_HANDLED;
 }
 
+static void free_active_ring(struct sock_mapping *map);
+
 static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
                                   struct sock_mapping *map)
 {
@@ -240,7 +242,7 @@ static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
        for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
                gnttab_end_foreign_access(map->active.ring->ref[i], NULL);
        gnttab_end_foreign_access(map->active.ref, NULL);
-       free_page((unsigned long)map->active.ring);
+       free_active_ring(map);
 
        kfree(map);
 }
@@ -1085,7 +1087,7 @@ static const struct xenbus_device_id pvcalls_front_ids[] = {
        { "" }
 };
 
-static int pvcalls_front_remove(struct xenbus_device *dev)
+static void pvcalls_front_remove(struct xenbus_device *dev)
 {
        struct pvcalls_bedata *bedata;
        struct sock_mapping *map = NULL, *n;
@@ -1121,7 +1123,6 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
        kfree(bedata->ring.sring);
        kfree(bedata);
        xenbus_switch_state(dev, XenbusStateClosed);
-       return 0;
 }
 
 static int pvcalls_front_probe(struct xenbus_device *dev,
index d171091..b11e401 100644 (file)
@@ -716,14 +716,12 @@ out:
        return err;
 }
 
-static int xen_pcibk_xenbus_remove(struct xenbus_device *dev)
+static void xen_pcibk_xenbus_remove(struct xenbus_device *dev)
 {
        struct xen_pcibk_device *pdev = dev_get_drvdata(&dev->dev);
 
        if (pdev != NULL)
                free_pdev(pdev);
-
-       return 0;
 }
 
 static const struct xenbus_device_id xen_pcibk_ids[] = {
index 6106ed9..954188b 100644 (file)
@@ -1249,7 +1249,7 @@ static void scsiback_release_translation_entry(struct vscsibk_info *info)
        spin_unlock_irqrestore(&info->v2p_lock, flags);
 }
 
-static int scsiback_remove(struct xenbus_device *dev)
+static void scsiback_remove(struct xenbus_device *dev)
 {
        struct vscsibk_info *info = dev_get_drvdata(&dev->dev);
 
@@ -1261,8 +1261,6 @@ static int scsiback_remove(struct xenbus_device *dev)
        gnttab_page_cache_shrink(&info->free_pages, 0);
 
        dev_set_drvdata(&dev->dev, NULL);
-
-       return 0;
 }
 
 static int scsiback_probe(struct xenbus_device *dev,
index 7dcd596..d4ddb20 100644 (file)
@@ -13,6 +13,8 @@
 #include "internal.h"
 #include "afs_cm.h"
 #include "protocol_yfs.h"
+#define RXRPC_TRACE_ONLY_DEFINE_ENUMS
+#include <trace/events/rxrpc.h>
 
 static int afs_deliver_cb_init_call_back_state(struct afs_call *);
 static int afs_deliver_cb_init_call_back_state3(struct afs_call *);
@@ -191,7 +193,7 @@ static void afs_cm_destructor(struct afs_call *call)
  * Abort a service call from within an action function.
  */
 static void afs_abort_service_call(struct afs_call *call, u32 abort_code, int error,
-                                  const char *why)
+                                  enum rxrpc_abort_reason why)
 {
        rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
                                abort_code, error, why);
@@ -469,7 +471,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
        if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
                afs_send_empty_reply(call);
        else
-               afs_abort_service_call(call, 1, 1, "K-1");
+               afs_abort_service_call(call, 1, 1, afs_abort_probeuuid_negative);
 
        afs_put_call(call);
        _leave("");
index c62939e..7817e2b 100644 (file)
@@ -13,6 +13,8 @@
 #include "internal.h"
 #include "afs_cm.h"
 #include "protocol_yfs.h"
+#define RXRPC_TRACE_ONLY_DEFINE_ENUMS
+#include <trace/events/rxrpc.h>
 
 struct workqueue_struct *afs_async_calls;
 
@@ -397,7 +399,8 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
 error_do_abort:
        if (ret != -ECONNABORTED) {
                rxrpc_kernel_abort_call(call->net->socket, rxcall,
-                                       RX_USER_ABORT, ret, "KSD");
+                                       RX_USER_ABORT, ret,
+                                       afs_abort_send_data_error);
        } else {
                len = 0;
                iov_iter_kvec(&msg.msg_iter, ITER_DEST, NULL, 0, 0);
@@ -527,7 +530,8 @@ static void afs_deliver_to_call(struct afs_call *call)
                case -ENOTSUPP:
                        abort_code = RXGEN_OPCODE;
                        rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
-                                               abort_code, ret, "KIV");
+                                               abort_code, ret,
+                                               afs_abort_op_not_supported);
                        goto local_abort;
                case -EIO:
                        pr_err("kAFS: Call %u in bad state %u\n",
@@ -542,12 +546,14 @@ static void afs_deliver_to_call(struct afs_call *call)
                        if (state != AFS_CALL_CL_AWAIT_REPLY)
                                abort_code = RXGEN_SS_UNMARSHAL;
                        rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
-                                               abort_code, ret, "KUM");
+                                               abort_code, ret,
+                                               afs_abort_unmarshal_error);
                        goto local_abort;
                default:
                        abort_code = RX_CALL_DEAD;
                        rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
-                                               abort_code, ret, "KER");
+                                               abort_code, ret,
+                                               afs_abort_general_error);
                        goto local_abort;
                }
        }
@@ -619,7 +625,8 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
                        /* Kill off the call if it's still live. */
                        _debug("call interrupted");
                        if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
-                                                   RX_USER_ABORT, -EINTR, "KWI"))
+                                                   RX_USER_ABORT, -EINTR,
+                                                   afs_abort_interrupted))
                                afs_set_call_complete(call, -EINTR, 0);
                }
        }
@@ -836,7 +843,8 @@ void afs_send_empty_reply(struct afs_call *call)
        case -ENOMEM:
                _debug("oom");
                rxrpc_kernel_abort_call(net->socket, call->rxcall,
-                                       RXGEN_SS_MARSHAL, -ENOMEM, "KOO");
+                                       RXGEN_SS_MARSHAL, -ENOMEM,
+                                       afs_abort_oom);
                fallthrough;
        default:
                _leave(" [error]");
@@ -878,7 +886,8 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
        if (n == -ENOMEM) {
                _debug("oom");
                rxrpc_kernel_abort_call(net->socket, call->rxcall,
-                                       RXGEN_SS_MARSHAL, -ENOMEM, "KOO");
+                                       RXGEN_SS_MARSHAL, -ENOMEM,
+                                       afs_abort_oom);
        }
        _leave(" [error]");
 }
@@ -900,6 +909,7 @@ int afs_extract_data(struct afs_call *call, bool want_more)
        ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, iter,
                                     &call->iov_len, want_more, &remote_abort,
                                     &call->service_id);
+       trace_afs_receive_data(call, call->iter, want_more, ret);
        if (ret == 0 || ret == -EAGAIN)
                return ret;
 
index de63572..9a780fa 100644 (file)
@@ -2034,7 +2034,7 @@ static int elf_core_dump(struct coredump_params *cprm)
         * The number of segs are recored into ELF header as 16bit value.
         * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
         */
-       segs = cprm->vma_count + elf_core_extra_phdrs();
+       segs = cprm->vma_count + elf_core_extra_phdrs(cprm);
 
        /* for notes section */
        segs++;
@@ -2074,7 +2074,7 @@ static int elf_core_dump(struct coredump_params *cprm)
        dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
 
        offset += cprm->vma_data_size;
-       offset += elf_core_extra_data_size();
+       offset += elf_core_extra_data_size(cprm);
        e_shoff = offset;
 
        if (e_phnum == PN_XNUM) {
index 096e352..a05eafc 100644 (file)
@@ -1509,7 +1509,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
        tmp->next = thread_list;
        thread_list = tmp;
 
-       segs = cprm->vma_count + elf_core_extra_phdrs();
+       segs = cprm->vma_count + elf_core_extra_phdrs(cprm);
 
        /* for notes section */
        segs++;
@@ -1555,7 +1555,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
        dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
 
        offset += cprm->vma_data_size;
-       offset += elf_core_extra_data_size();
+       offset += elf_core_extra_data_size(cprm);
        e_shoff = offset;
 
        if (e_phnum == PN_XNUM) {
index 5db73c0..cbc18b4 100644 (file)
@@ -278,6 +278,7 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
         * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) +
         * unicode length of a netbios domain name
         */
+       kfree_sensitive(ses->auth_key.response);
        ses->auth_key.len = size + 2 * dlen;
        ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL);
        if (!ses->auth_key.response) {
index d371259..b2a04b4 100644 (file)
@@ -2606,11 +2606,14 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
        INIT_LIST_HEAD(&tcon->pending_opens);
        tcon->status = TID_GOOD;
 
-       /* schedule query interfaces poll */
        INIT_DELAYED_WORK(&tcon->query_interfaces,
                          smb2_query_server_interfaces);
-       queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
-                          (SMB_INTERFACE_POLL_INTERVAL * HZ));
+       if (ses->server->dialect >= SMB30_PROT_ID &&
+           (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+               /* schedule query interfaces poll */
+               queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+                                  (SMB_INTERFACE_POLL_INTERVAL * HZ));
+       }
 
        spin_lock(&cifs_tcp_ses_lock);
        list_add(&tcon->tcon_list, &ses->tcon_list);
index 43ad117..e20f888 100644 (file)
@@ -1299,7 +1299,6 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
         * Resolve share's hostname and check if server address matches.  Otherwise just ignore it
         * as we could not have upcall to resolve hostname or failed to convert ip address.
         */
-       match = true;
        extract_unc_hostname(s1, &host, &hostlen);
        scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
 
index bd374fe..a5a097a 100644 (file)
@@ -428,6 +428,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
        oparms.disposition = FILE_CREATE;
        oparms.fid = &fid;
        oparms.reconnect = false;
+       oparms.mode = 0644;
 
        rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
                       NULL, NULL);
index 0b842a0..c47b254 100644 (file)
@@ -815,6 +815,7 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
                return -EINVAL;
        }
        if (tilen) {
+               kfree_sensitive(ses->auth_key.response);
                ses->auth_key.response = kmemdup(bcc_ptr + tioffset, tilen,
                                                 GFP_KERNEL);
                if (!ses->auth_key.response) {
@@ -1428,6 +1429,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
                goto out_put_spnego_key;
        }
 
+       kfree_sensitive(ses->auth_key.response);
        ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
                                         GFP_KERNEL);
        if (!ses->auth_key.response) {
index 5048075..4cb3644 100644 (file)
@@ -562,17 +562,20 @@ static int cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
        if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
                rc = SMBQueryInformation(xid, tcon, full_path, &fi, cifs_sb->local_nls,
                                         cifs_remap(cifs_sb));
-               if (!rc)
-                       move_cifs_info_to_smb2(&data->fi, &fi);
                *adjustTZ = true;
        }
 
-       if (!rc && (le32_to_cpu(fi.Attributes) & ATTR_REPARSE)) {
+       if (!rc) {
                int tmprc;
                int oplock = 0;
                struct cifs_fid fid;
                struct cifs_open_parms oparms;
 
+               move_cifs_info_to_smb2(&data->fi, &fi);
+
+               if (!(le32_to_cpu(fi.Attributes) & ATTR_REPARSE))
+                       return 0;
+
                oparms.tcon = tcon;
                oparms.cifs_sb = cifs_sb;
                oparms.desired_access = FILE_READ_ATTRIBUTES;
@@ -716,17 +719,25 @@ cifs_mkdir_setinfo(struct inode *inode, const char *full_path,
 static int cifs_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock,
                          void *buf)
 {
-       FILE_ALL_INFO *fi = buf;
+       struct cifs_open_info_data *data = buf;
+       FILE_ALL_INFO fi = {};
+       int rc;
 
        if (!(oparms->tcon->ses->capabilities & CAP_NT_SMBS))
-               return SMBLegacyOpen(xid, oparms->tcon, oparms->path,
-                                    oparms->disposition,
-                                    oparms->desired_access,
-                                    oparms->create_options,
-                                    &oparms->fid->netfid, oplock, fi,
-                                    oparms->cifs_sb->local_nls,
-                                    cifs_remap(oparms->cifs_sb));
-       return CIFS_open(xid, oparms, oplock, fi);
+               rc = SMBLegacyOpen(xid, oparms->tcon, oparms->path,
+                                  oparms->disposition,
+                                  oparms->desired_access,
+                                  oparms->create_options,
+                                  &oparms->fid->netfid, oplock, &fi,
+                                  oparms->cifs_sb->local_nls,
+                                  cifs_remap(oparms->cifs_sb));
+       else
+               rc = CIFS_open(xid, oparms, oplock, &fi);
+
+       if (!rc && data)
+               move_cifs_info_to_smb2(&data->fi, &fi);
+
+       return rc;
 }
 
 static void
@@ -1050,7 +1061,7 @@ cifs_make_node(unsigned int xid, struct inode *inode,
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct inode *newinode = NULL;
        int rc = -EPERM;
-       FILE_ALL_INFO *buf = NULL;
+       struct cifs_open_info_data buf = {};
        struct cifs_io_parms io_parms;
        __u32 oplock = 0;
        struct cifs_fid fid;
@@ -1082,14 +1093,14 @@ cifs_make_node(unsigned int xid, struct inode *inode,
                                            cifs_sb->local_nls,
                                            cifs_remap(cifs_sb));
                if (rc)
-                       goto out;
+                       return rc;
 
                rc = cifs_get_inode_info_unix(&newinode, full_path,
                                              inode->i_sb, xid);
 
                if (rc == 0)
                        d_instantiate(dentry, newinode);
-               goto out;
+               return rc;
        }
 
        /*
@@ -1097,19 +1108,13 @@ cifs_make_node(unsigned int xid, struct inode *inode,
         * support block and char device (no socket & fifo)
         */
        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
-               goto out;
+               return rc;
 
        if (!S_ISCHR(mode) && !S_ISBLK(mode))
-               goto out;
+               return rc;
 
        cifs_dbg(FYI, "sfu compat create special file\n");
 
-       buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
-       if (buf == NULL) {
-               rc = -ENOMEM;
-               goto out;
-       }
-
        oparms.tcon = tcon;
        oparms.cifs_sb = cifs_sb;
        oparms.desired_access = GENERIC_WRITE;
@@ -1124,21 +1129,21 @@ cifs_make_node(unsigned int xid, struct inode *inode,
                oplock = REQ_OPLOCK;
        else
                oplock = 0;
-       rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
+       rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &buf);
        if (rc)
-               goto out;
+               return rc;
 
        /*
         * BB Do not bother to decode buf since no local inode yet to put
         * timestamps in, but we can reuse it safely.
         */
 
-       pdev = (struct win_dev *)buf;
+       pdev = (struct win_dev *)&buf.fi;
        io_parms.pid = current->tgid;
        io_parms.tcon = tcon;
        io_parms.offset = 0;
        io_parms.length = sizeof(struct win_dev);
-       iov[1].iov_base = buf;
+       iov[1].iov_base = &buf.fi;
        iov[1].iov_len = sizeof(struct win_dev);
        if (S_ISCHR(mode)) {
                memcpy(pdev->type, "IntxCHR", 8);
@@ -1157,8 +1162,8 @@ cifs_make_node(unsigned int xid, struct inode *inode,
        d_drop(dentry);
 
        /* FIXME: add code here to set EAs */
-out:
-       kfree(buf);
+
+       cifs_free_open_info(&buf);
        return rc;
 }
 
index 2c484d4..4b71f4a 100644 (file)
@@ -1453,6 +1453,7 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
 
        /* keep session key if binding */
        if (!is_binding) {
+               kfree_sensitive(ses->auth_key.response);
                ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
                                                 GFP_KERNEL);
                if (!ses->auth_key.response) {
@@ -1482,8 +1483,11 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
 out_put_spnego_key:
        key_invalidate(spnego_key);
        key_put(spnego_key);
-       if (rc)
+       if (rc) {
                kfree_sensitive(ses->auth_key.response);
+               ses->auth_key.response = NULL;
+               ses->auth_key.len = 0;
+       }
 out:
        sess_data->result = rc;
        sess_data->func = NULL;
index 2a39ffb..6e61b5b 100644 (file)
@@ -322,7 +322,8 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
        dn_off = le32_to_cpu(authblob->DomainName.BufferOffset);
        dn_len = le16_to_cpu(authblob->DomainName.Length);
 
-       if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len)
+       if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len ||
+           nt_len < CIFS_ENCPWD_SIZE)
                return -EINVAL;
 
        /* TODO : use domain name that imported from configuration file */
index 12be838..fd0a288 100644 (file)
@@ -316,9 +316,12 @@ int ksmbd_conn_handler_loop(void *p)
 
                /* 4 for rfc1002 length field */
                size = pdu_size + 4;
-               conn->request_buf = kvmalloc(size, GFP_KERNEL);
+               conn->request_buf = kvmalloc(size,
+                                            GFP_KERNEL |
+                                            __GFP_NOWARN |
+                                            __GFP_NORETRY);
                if (!conn->request_buf)
-                       continue;
+                       break;
 
                memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf));
                if (!ksmbd_smb_request(conn))
index 14d7f35..38fbda5 100644 (file)
@@ -1928,13 +1928,13 @@ int smb2_tree_connect(struct ksmbd_work *work)
        if (conn->posix_ext_supported)
                status.tree_conn->posix_extensions = true;
 
-out_err1:
        rsp->StructureSize = cpu_to_le16(16);
+       inc_rfc1001_len(work->response_buf, 16);
+out_err1:
        rsp->Capabilities = 0;
        rsp->Reserved = 0;
        /* default manual caching */
        rsp->ShareFlags = SMB2_SHAREFLAG_MANUAL_CACHING;
-       inc_rfc1001_len(work->response_buf, 16);
 
        if (!IS_ERR(treename))
                kfree(treename);
@@ -1967,6 +1967,9 @@ out_err1:
                rsp->hdr.Status = STATUS_ACCESS_DENIED;
        }
 
+       if (status.ret != KSMBD_TREE_CONN_STATUS_OK)
+               smb2_set_err_rsp(work);
+
        return rc;
 }
 
index 63d55f5..4c6bd0b 100644 (file)
@@ -295,6 +295,7 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
        struct msghdr ksmbd_msg;
        struct kvec *iov;
        struct ksmbd_conn *conn = KSMBD_TRANS(t)->conn;
+       int max_retry = 2;
 
        iov = get_conn_iovec(t, nr_segs);
        if (!iov)
@@ -321,9 +322,11 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
                } else if (conn->status == KSMBD_SESS_NEED_RECONNECT) {
                        total_read = -EAGAIN;
                        break;
-               } else if (length == -ERESTARTSYS || length == -EAGAIN) {
+               } else if ((length == -ERESTARTSYS || length == -EAGAIN) &&
+                          max_retry) {
                        usleep_range(1000, 2000);
                        length = 0;
+                       max_retry--;
                        continue;
                } else if (length <= 0) {
                        total_read = -EAGAIN;
index 45b2c9e..0ef0703 100644 (file)
@@ -1071,8 +1071,8 @@ nfsd_file_is_cached(struct inode *inode)
 
 static __be32
 nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
-                    unsigned int may_flags, struct nfsd_file **pnf,
-                    bool open, bool want_gc)
+                    unsigned int may_flags, struct file *file,
+                    struct nfsd_file **pnf, bool want_gc)
 {
        struct nfsd_file_lookup_key key = {
                .type   = NFSD_FILE_KEY_FULL,
@@ -1147,8 +1147,7 @@ wait_for_construction:
        status = nfserrno(nfsd_open_break_lease(file_inode(nf->nf_file), may_flags));
 out:
        if (status == nfs_ok) {
-               if (open)
-                       this_cpu_inc(nfsd_file_acquisitions);
+               this_cpu_inc(nfsd_file_acquisitions);
                *pnf = nf;
        } else {
                if (refcount_dec_and_test(&nf->nf_ref))
@@ -1158,20 +1157,23 @@ out:
 
 out_status:
        put_cred(key.cred);
-       if (open)
-               trace_nfsd_file_acquire(rqstp, key.inode, may_flags, nf, status);
+       trace_nfsd_file_acquire(rqstp, key.inode, may_flags, nf, status);
        return status;
 
 open_file:
        trace_nfsd_file_alloc(nf);
        nf->nf_mark = nfsd_file_mark_find_or_create(nf, key.inode);
        if (nf->nf_mark) {
-               if (open) {
+               if (file) {
+                       get_file(file);
+                       nf->nf_file = file;
+                       status = nfs_ok;
+                       trace_nfsd_file_opened(nf, status);
+               } else {
                        status = nfsd_open_verified(rqstp, fhp, may_flags,
                                                    &nf->nf_file);
                        trace_nfsd_file_open(nf, status);
-               } else
-                       status = nfs_ok;
+               }
        } else
                status = nfserr_jukebox;
        /*
@@ -1207,7 +1209,7 @@ __be32
 nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
                     unsigned int may_flags, struct nfsd_file **pnf)
 {
-       return nfsd_file_do_acquire(rqstp, fhp, may_flags, pnf, true, true);
+       return nfsd_file_do_acquire(rqstp, fhp, may_flags, NULL, pnf, true);
 }
 
 /**
@@ -1228,28 +1230,30 @@ __be32
 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
                  unsigned int may_flags, struct nfsd_file **pnf)
 {
-       return nfsd_file_do_acquire(rqstp, fhp, may_flags, pnf, true, false);
+       return nfsd_file_do_acquire(rqstp, fhp, may_flags, NULL, pnf, false);
 }
 
 /**
- * nfsd_file_create - Get a struct nfsd_file, do not open
+ * nfsd_file_acquire_opened - Get a struct nfsd_file using existing open file
  * @rqstp: the RPC transaction being executed
  * @fhp: the NFS filehandle of the file just created
  * @may_flags: NFSD_MAY_ settings for the file
+ * @file: cached, already-open file (may be NULL)
  * @pnf: OUT: new or found "struct nfsd_file" object
  *
- * The nfsd_file_object returned by this API is reference-counted
- * but not garbage-collected. The object is released immediately
- * one RCU grace period after the final nfsd_file_put().
+ * Acquire a nfsd_file object that is not GC'ed. If one doesn't already exist,
+ * and @file is non-NULL, use it to instantiate a new nfsd_file instead of
+ * opening a new one.
  *
  * Returns nfs_ok and sets @pnf on success; otherwise an nfsstat in
  * network byte order is returned.
  */
 __be32
-nfsd_file_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
-                unsigned int may_flags, struct nfsd_file **pnf)
+nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
+                        unsigned int may_flags, struct file *file,
+                        struct nfsd_file **pnf)
 {
-       return nfsd_file_do_acquire(rqstp, fhp, may_flags, pnf, false, false);
+       return nfsd_file_do_acquire(rqstp, fhp, may_flags, file, pnf, false);
 }
 
 /*
index b7efb2c..41516a4 100644 (file)
@@ -60,7 +60,8 @@ __be32 nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
                  unsigned int may_flags, struct nfsd_file **nfp);
 __be32 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
                  unsigned int may_flags, struct nfsd_file **nfp);
-__be32 nfsd_file_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
-                 unsigned int may_flags, struct nfsd_file **nfp);
+__be32 nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
+                 unsigned int may_flags, struct file *file,
+                 struct nfsd_file **nfp);
 int nfsd_file_cache_stats_show(struct seq_file *m, void *v);
 #endif /* _FS_NFSD_FILECACHE_H */
index bd880d5..9b81d01 100644 (file)
@@ -937,7 +937,7 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
         * the client wants us to do more in this compound:
         */
        if (!nfsd4_last_compound_op(rqstp))
-               __clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+               clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
 
        /* check stateid */
        status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
@@ -2607,12 +2607,11 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
        cstate->minorversion = args->minorversion;
        fh_init(current_fh, NFS4_FHSIZE);
        fh_init(save_fh, NFS4_FHSIZE);
-
        /*
         * Don't use the deferral mechanism for NFSv4; compounds make it
         * too hard to avoid non-idempotency problems.
         */
-       __clear_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
+       clear_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
 
        /*
         * According to RFC3010, this takes precedence over all other errors.
@@ -2734,7 +2733,7 @@ encode_op:
 out:
        cstate->status = status;
        /* Reset deferral mechanism for RPC deferrals */
-       __set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
+       set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
        return rpc_success;
 }
 
index 7b2ee53..4809ae0 100644 (file)
@@ -5262,18 +5262,10 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
        if (!fp->fi_fds[oflag]) {
                spin_unlock(&fp->fi_lock);
 
-               if (!open->op_filp) {
-                       status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
-                       if (status != nfs_ok)
-                               goto out_put_access;
-               } else {
-                       status = nfsd_file_create(rqstp, cur_fh, access, &nf);
-                       if (status != nfs_ok)
-                               goto out_put_access;
-                       nf->nf_file = open->op_filp;
-                       open->op_filp = NULL;
-                       trace_nfsd_file_create(rqstp, access, nf);
-               }
+               status = nfsd_file_acquire_opened(rqstp, cur_fh, access,
+                                                 open->op_filp, &nf);
+               if (status != nfs_ok)
+                       goto out_put_access;
 
                spin_lock(&fp->fi_lock);
                if (!fp->fi_fds[oflag]) {
index ebb4d02..97edb32 100644 (file)
@@ -2523,7 +2523,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
        argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE;
 
        if (readcount > 1 || max_reply > PAGE_SIZE - auth_slack)
-               __clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags);
+               clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags);
 
        return true;
 }
index a5570cf..9744443 100644 (file)
@@ -211,7 +211,7 @@ nfsd_proc_read(struct svc_rqst *rqstp)
        if (resp->status == nfs_ok)
                resp->status = fh_getattr(&resp->fh, &resp->stat);
        else if (resp->status == nfserr_jukebox)
-               __set_bit(RQ_DROPME, &rqstp->rq_flags);
+               set_bit(RQ_DROPME, &rqstp->rq_flags);
        return rpc_success;
 }
 
@@ -246,7 +246,7 @@ nfsd_proc_write(struct svc_rqst *rqstp)
        if (resp->status == nfs_ok)
                resp->status = fh_getattr(&resp->fh, &resp->stat);
        else if (resp->status == nfserr_jukebox)
-               __set_bit(RQ_DROPME, &rqstp->rq_flags);
+               set_bit(RQ_DROPME, &rqstp->rq_flags);
        return rpc_success;
 }
 
index c852ae8..8f9c82d 100644 (file)
@@ -981,43 +981,6 @@ TRACE_EVENT(nfsd_file_acquire,
        )
 );
 
-TRACE_EVENT(nfsd_file_create,
-       TP_PROTO(
-               const struct svc_rqst *rqstp,
-               unsigned int may_flags,
-               const struct nfsd_file *nf
-       ),
-
-       TP_ARGS(rqstp, may_flags, nf),
-
-       TP_STRUCT__entry(
-               __field(const void *, nf_inode)
-               __field(const void *, nf_file)
-               __field(unsigned long, may_flags)
-               __field(unsigned long, nf_flags)
-               __field(unsigned long, nf_may)
-               __field(unsigned int, nf_ref)
-               __field(u32, xid)
-       ),
-
-       TP_fast_assign(
-               __entry->nf_inode = nf->nf_inode;
-               __entry->nf_file = nf->nf_file;
-               __entry->may_flags = may_flags;
-               __entry->nf_flags = nf->nf_flags;
-               __entry->nf_may = nf->nf_may;
-               __entry->nf_ref = refcount_read(&nf->nf_ref);
-               __entry->xid = be32_to_cpu(rqstp->rq_xid);
-       ),
-
-       TP_printk("xid=0x%x inode=%p may_flags=%s ref=%u nf_flags=%s nf_may=%s nf_file=%p",
-               __entry->xid, __entry->nf_inode,
-               show_nfsd_may_flags(__entry->may_flags),
-               __entry->nf_ref, show_nf_flags(__entry->nf_flags),
-               show_nfsd_may_flags(__entry->nf_may), __entry->nf_file
-       )
-);
-
 TRACE_EVENT(nfsd_file_insert_err,
        TP_PROTO(
                const struct svc_rqst *rqstp,
@@ -1079,8 +1042,8 @@ TRACE_EVENT(nfsd_file_cons_err,
        )
 );
 
-TRACE_EVENT(nfsd_file_open,
-       TP_PROTO(struct nfsd_file *nf, __be32 status),
+DECLARE_EVENT_CLASS(nfsd_file_open_class,
+       TP_PROTO(const struct nfsd_file *nf, __be32 status),
        TP_ARGS(nf, status),
        TP_STRUCT__entry(
                __field(void *, nf_inode)       /* cannot be dereferenced */
@@ -1104,6 +1067,17 @@ TRACE_EVENT(nfsd_file_open,
                __entry->nf_file)
 )
 
+#define DEFINE_NFSD_FILE_OPEN_EVENT(name)                                      \
+DEFINE_EVENT(nfsd_file_open_class, name,                                       \
+       TP_PROTO(                                                       \
+               const struct nfsd_file *nf,                             \
+               __be32 status                                           \
+       ),                                                              \
+       TP_ARGS(nf, status))
+
+DEFINE_NFSD_FILE_OPEN_EVENT(nfsd_file_open);
+DEFINE_NFSD_FILE_OPEN_EVENT(nfsd_file_opened);
+
 TRACE_EVENT(nfsd_file_is_cached,
        TP_PROTO(
                const struct inode *inode,
index 4c16c8c..35f5744 100644 (file)
@@ -4666,7 +4666,12 @@ xfs_btree_space_to_height(
        const unsigned int      *limits,
        unsigned long long      leaf_blocks)
 {
-       unsigned long long      node_blocks = limits[1];
+       /*
+        * The root btree block can have fewer than minrecs pointers in it
+        * because the tree might not be big enough to require that amount of
+        * fanout. Hence it has a minimum size of 2 pointers, not limits[1].
+        */
+       unsigned long long      node_blocks = 2;
        unsigned long long      blocks_left = leaf_blocks - 1;
        unsigned int            height = 1;
 
index ad22a00..f3d328e 100644 (file)
@@ -236,6 +236,7 @@ xfs_extent_busy_update_extent(
                 *
                 */
                busyp->bno = fend;
+               busyp->length = bend - fend;
        } else if (bbno < fbno) {
                /*
                 * Case 8:
index f35e2ce..ddeaccc 100644 (file)
@@ -1853,12 +1853,20 @@ xfs_inodegc_worker(
                                                struct xfs_inodegc, work);
        struct llist_node       *node = llist_del_all(&gc->list);
        struct xfs_inode        *ip, *n;
+       unsigned int            nofs_flag;
 
        WRITE_ONCE(gc->items, 0);
 
        if (!node)
                return;
 
+       /*
+        * We can allocate memory here while doing writeback on behalf of
+        * memory reclaim.  To avoid memory allocation deadlocks set the
+        * task-wide nofs context for the following operations.
+        */
+       nofs_flag = memalloc_nofs_save();
+
        ip = llist_entry(node, struct xfs_inode, i_gclist);
        trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
 
@@ -1867,6 +1875,8 @@ xfs_inodegc_worker(
                xfs_iflags_set(ip, XFS_INACTIVATING);
                xfs_inodegc_inactivate(ip);
        }
+
+       memalloc_nofs_restore(nofs_flag);
 }
 
 /*
index 13f1b2a..736510b 100644 (file)
@@ -754,7 +754,7 @@ xfs_bulkstat_fmt(
 static int
 xfs_bulk_ireq_setup(
        struct xfs_mount        *mp,
-       struct xfs_bulk_ireq    *hdr,
+       const struct xfs_bulk_ireq *hdr,
        struct xfs_ibulk        *breq,
        void __user             *ubuffer)
 {
@@ -780,7 +780,7 @@ xfs_bulk_ireq_setup(
 
                switch (hdr->ino) {
                case XFS_BULK_IREQ_SPECIAL_ROOT:
-                       hdr->ino = mp->m_sb.sb_rootino;
+                       breq->startino = mp->m_sb.sb_rootino;
                        break;
                default:
                        return -EINVAL;
index 669c1bc..fc1946f 100644 (file)
@@ -83,7 +83,7 @@ xfs_iomap_valid(
        return true;
 }
 
-const struct iomap_page_ops xfs_iomap_page_ops = {
+static const struct iomap_page_ops xfs_iomap_page_ops = {
        .iomap_valid            = xfs_iomap_valid,
 };
 
index ff53d40..e2c542f 100644 (file)
@@ -68,7 +68,7 @@ restart:
 
        while (1) {
                struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
-               int             error = 0;
+               int             error;
                int             i;
 
                mutex_lock(&qi->qi_tree_lock);
index fe46bce..5535778 100644 (file)
@@ -416,8 +416,6 @@ xfs_reflink_fill_cow_hole(
                goto convert;
        }
 
-       ASSERT(cmap->br_startoff > imap->br_startoff);
-
        /* Allocate the entire reservation as unwritten blocks. */
        nimaps = 1;
        error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
index cd3b75e..e44be31 100644 (file)
@@ -230,7 +230,8 @@ struct acpi_pnp_type {
        u32 hardware_id:1;
        u32 bus_address:1;
        u32 platform_id:1;
-       u32 reserved:29;
+       u32 backlight:1;
+       u32 reserved:28;
 };
 
 struct acpi_device_pnp {
index 9ec8129..bd55605 100644 (file)
@@ -105,14 +105,14 @@ int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
  * Dumping its extra ELF program headers includes all the other information
  * a debugger needs to easily find how the gate DSO was being used.
  */
-extern Elf_Half elf_core_extra_phdrs(void);
+extern Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm);
 extern int
 elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
 extern int
 elf_core_write_extra_data(struct coredump_params *cprm);
-extern size_t elf_core_extra_data_size(void);
+extern size_t elf_core_extra_data_size(struct coredump_params *cprm);
 #else
-static inline Elf_Half elf_core_extra_phdrs(void)
+static inline Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
 {
        return 0;
 }
@@ -127,7 +127,7 @@ static inline int elf_core_write_extra_data(struct coredump_params *cprm)
        return 1;
 }
 
-static inline size_t elf_core_extra_data_size(void)
+static inline size_t elf_core_extra_data_size(struct coredump_params *cprm)
 {
        return 0;
 }
index c3eb896..b1b28af 100644 (file)
@@ -1266,6 +1266,9 @@ int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
 int ipi_send_single(unsigned int virq, unsigned int cpu);
 int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
 
+void ipi_mux_process(void);
+int ipi_mux_create(unsigned int nr_ipi, void (*mux_send)(unsigned int cpu));
+
 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
 /*
  * Registers a generic IRQ handling function as the top-level IRQ handler in
index a372086..d320d15 100644 (file)
@@ -125,6 +125,8 @@ struct irq_domain_chip_generic;
  *             core code.
  * @flags:     Per irq_domain flags
  * @mapcount:  The number of mapped interrupts
+ * @mutex:     Domain lock, hierarchical domains use root domain's lock
+ * @root:      Pointer to root domain, or containing structure if non-hierarchical
  *
  * Optional elements:
  * @fwnode:    Pointer to firmware node associated with the irq_domain. Pretty easy
@@ -143,7 +145,6 @@ struct irq_domain_chip_generic;
  * Revmap data, used internally by the irq domain code:
  * @revmap_size:       Size of the linear map table @revmap[]
  * @revmap_tree:       Radix map tree for hwirqs that don't fit in the linear map
- * @revmap_mutex:      Lock for the revmap
  * @revmap:            Linear table of irq_data pointers
  */
 struct irq_domain {
@@ -153,6 +154,8 @@ struct irq_domain {
        void                            *host_data;
        unsigned int                    flags;
        unsigned int                    mapcount;
+       struct mutex                    mutex;
+       struct irq_domain               *root;
 
        /* Optional data */
        struct fwnode_handle            *fwnode;
@@ -171,7 +174,6 @@ struct irq_domain {
        irq_hw_number_t                 hwirq_max;
        unsigned int                    revmap_size;
        struct radix_tree_root          revmap_tree;
-       struct mutex                    revmap_mutex;
        struct irq_data __rcu           *revmap[];
 };
 
index d476255..76ef2e4 100644 (file)
@@ -315,7 +315,7 @@ struct mlx5_cmd {
        struct mlx5_cmd_debug dbg;
        struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
        int checksum_disabled;
-       struct mlx5_cmd_stats *stats;
+       struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
 };
 
 struct mlx5_cmd_mailbox {
index 2576555..a3f8cdc 100644 (file)
@@ -7,7 +7,6 @@
 #define __LINUX_MTD_SPI_NOR_H
 
 #include <linux/bitops.h>
-#include <linux/mtd/cfi.h>
 #include <linux/mtd/mtd.h>
 #include <linux/spi/spi-mem.h>
 
index 632320e..a48bb52 100644 (file)
@@ -32,7 +32,8 @@ enum simatic_ipc_station_ids {
        SIMATIC_IPC_IPC477E = 0x00000A02,
        SIMATIC_IPC_IPC127E = 0x00000D01,
        SIMATIC_IPC_IPC227G = 0x00000F01,
-       SIMATIC_IPC_IPC427G = 0x00001001,
+       SIMATIC_IPC_IPCBX_39A = 0x00001001,
+       SIMATIC_IPC_IPCPX_39A = 0x00001002,
 };
 
 static inline u32 simatic_ipc_get_station_id(u8 *data, int max_len)
index 20c0ff5..7d68a5c 100644 (file)
@@ -198,8 +198,8 @@ static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *ev
         * The loop below will unmap these fields if the log is larger than
         * one page, so save them here for reference:
         */
-       count = READ_ONCE(event->count);
-       event_type = READ_ONCE(event->event_type);
+       count = event->count;
+       event_type = event->event_type;
 
        /* Verify that it's the log header */
        if (event_header->pcr_idx != 0 ||
index d5a5ae9..ba717ea 100644 (file)
@@ -15,6 +15,7 @@ struct key;
 struct sock;
 struct socket;
 struct rxrpc_call;
+enum rxrpc_abort_reason;
 
 enum rxrpc_interruptibility {
        RXRPC_INTERRUPTIBLE,    /* Call is interruptible */
@@ -55,7 +56,7 @@ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
 int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
                           struct iov_iter *, size_t *, bool, u32 *, u16 *);
 bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
-                            u32, int, const char *);
+                            u32, int, enum rxrpc_abort_reason);
 void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
 void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
                           struct sockaddr_rxrpc *);
index cab52b0..34c0370 100644 (file)
@@ -236,6 +236,14 @@ enum {
        ISCSI_SESSION_FREE,
 };
 
+enum {
+       ISCSI_SESSION_TARGET_UNBOUND,
+       ISCSI_SESSION_TARGET_ALLOCATED,
+       ISCSI_SESSION_TARGET_SCANNED,
+       ISCSI_SESSION_TARGET_UNBINDING,
+       ISCSI_SESSION_TARGET_MAX,
+};
+
 #define ISCSI_MAX_TARGET -1
 
 struct iscsi_cls_session {
@@ -264,6 +272,7 @@ struct iscsi_cls_session {
         */
        pid_t creator;
        int state;
+       int target_state;                       /* session target bind state */
        int sid;                                /* session id */
        void *dd_data;                          /* LLD private data */
        struct device dev;      /* sysfs transport/container device */
index 5f9dd73..283db0e 100644 (file)
 /*
  * Declare tracing information enums and their string mappings for display.
  */
+#define rxrpc_abort_reasons \
+       /* AFS errors */                                                \
+       EM(afs_abort_general_error,             "afs-error")            \
+       EM(afs_abort_interrupted,               "afs-intr")             \
+       EM(afs_abort_oom,                       "afs-oom")              \
+       EM(afs_abort_op_not_supported,          "afs-op-notsupp")       \
+       EM(afs_abort_probeuuid_negative,        "afs-probeuuid-neg")    \
+       EM(afs_abort_send_data_error,           "afs-send-data")        \
+       EM(afs_abort_unmarshal_error,           "afs-unmarshal")        \
+       /* rxperf errors */                                             \
+       EM(rxperf_abort_general_error,          "rxperf-error")         \
+       EM(rxperf_abort_oom,                    "rxperf-oom")           \
+       EM(rxperf_abort_op_not_supported,       "rxperf-op-notsupp")    \
+       EM(rxperf_abort_unmarshal_error,        "rxperf-unmarshal")     \
+       /* RxKAD security errors */                                     \
+       EM(rxkad_abort_1_short_check,           "rxkad1-short-check")   \
+       EM(rxkad_abort_1_short_data,            "rxkad1-short-data")    \
+       EM(rxkad_abort_1_short_encdata,         "rxkad1-short-encdata") \
+       EM(rxkad_abort_1_short_header,          "rxkad1-short-hdr")     \
+       EM(rxkad_abort_2_short_check,           "rxkad2-short-check")   \
+       EM(rxkad_abort_2_short_data,            "rxkad2-short-data")    \
+       EM(rxkad_abort_2_short_header,          "rxkad2-short-hdr")     \
+       EM(rxkad_abort_2_short_len,             "rxkad2-short-len")     \
+       EM(rxkad_abort_bad_checksum,            "rxkad2-bad-cksum")     \
+       EM(rxkad_abort_chall_key_expired,       "rxkad-chall-key-exp")  \
+       EM(rxkad_abort_chall_level,             "rxkad-chall-level")    \
+       EM(rxkad_abort_chall_no_key,            "rxkad-chall-nokey")    \
+       EM(rxkad_abort_chall_short,             "rxkad-chall-short")    \
+       EM(rxkad_abort_chall_version,           "rxkad-chall-version")  \
+       EM(rxkad_abort_resp_bad_callid,         "rxkad-resp-bad-callid") \
+       EM(rxkad_abort_resp_bad_checksum,       "rxkad-resp-bad-cksum") \
+       EM(rxkad_abort_resp_bad_param,          "rxkad-resp-bad-param") \
+       EM(rxkad_abort_resp_call_ctr,           "rxkad-resp-call-ctr") \
+       EM(rxkad_abort_resp_call_state,         "rxkad-resp-call-state") \
+       EM(rxkad_abort_resp_key_expired,        "rxkad-resp-key-exp")   \
+       EM(rxkad_abort_resp_key_rejected,       "rxkad-resp-key-rej")   \
+       EM(rxkad_abort_resp_level,              "rxkad-resp-level")     \
+       EM(rxkad_abort_resp_nokey,              "rxkad-resp-nokey")     \
+       EM(rxkad_abort_resp_ooseq,              "rxkad-resp-ooseq")     \
+       EM(rxkad_abort_resp_short,              "rxkad-resp-short")     \
+       EM(rxkad_abort_resp_short_tkt,          "rxkad-resp-short-tkt") \
+       EM(rxkad_abort_resp_tkt_aname,          "rxkad-resp-tk-aname")  \
+       EM(rxkad_abort_resp_tkt_expired,        "rxkad-resp-tk-exp")    \
+       EM(rxkad_abort_resp_tkt_future,         "rxkad-resp-tk-future") \
+       EM(rxkad_abort_resp_tkt_inst,           "rxkad-resp-tk-inst")   \
+       EM(rxkad_abort_resp_tkt_len,            "rxkad-resp-tk-len")    \
+       EM(rxkad_abort_resp_tkt_realm,          "rxkad-resp-tk-realm")  \
+       EM(rxkad_abort_resp_tkt_short,          "rxkad-resp-tk-short")  \
+       EM(rxkad_abort_resp_tkt_sinst,          "rxkad-resp-tk-sinst")  \
+       EM(rxkad_abort_resp_tkt_sname,          "rxkad-resp-tk-sname")  \
+       EM(rxkad_abort_resp_unknown_tkt,        "rxkad-resp-unknown-tkt") \
+       EM(rxkad_abort_resp_version,            "rxkad-resp-version")   \
+       /* rxrpc errors */                                              \
+       EM(rxrpc_abort_call_improper_term,      "call-improper-term")   \
+       EM(rxrpc_abort_call_reset,              "call-reset")           \
+       EM(rxrpc_abort_call_sendmsg,            "call-sendmsg")         \
+       EM(rxrpc_abort_call_sock_release,       "call-sock-rel")        \
+       EM(rxrpc_abort_call_sock_release_tba,   "call-sock-rel-tba")    \
+       EM(rxrpc_abort_call_timeout,            "call-timeout")         \
+       EM(rxrpc_abort_no_service_key,          "no-serv-key")          \
+       EM(rxrpc_abort_nomem,                   "nomem")                \
+       EM(rxrpc_abort_service_not_offered,     "serv-not-offered")     \
+       EM(rxrpc_abort_shut_down,               "shut-down")            \
+       EM(rxrpc_abort_unsupported_security,    "unsup-sec")            \
+       EM(rxrpc_badmsg_bad_abort,              "bad-abort")            \
+       EM(rxrpc_badmsg_bad_jumbo,              "bad-jumbo")            \
+       EM(rxrpc_badmsg_short_ack,              "short-ack")            \
+       EM(rxrpc_badmsg_short_ack_info,         "short-ack-info")       \
+       EM(rxrpc_badmsg_short_hdr,              "short-hdr")            \
+       EM(rxrpc_badmsg_unsupported_packet,     "unsup-pkt")            \
+       EM(rxrpc_badmsg_zero_call,              "zero-call")            \
+       EM(rxrpc_badmsg_zero_seq,               "zero-seq")             \
+       EM(rxrpc_badmsg_zero_service,           "zero-service")         \
+       EM(rxrpc_eproto_ackr_outside_window,    "ackr-out-win")         \
+       EM(rxrpc_eproto_ackr_sack_overflow,     "ackr-sack-over")       \
+       EM(rxrpc_eproto_ackr_short_sack,        "ackr-short-sack")      \
+       EM(rxrpc_eproto_ackr_zero,              "ackr-zero")            \
+       EM(rxrpc_eproto_bad_upgrade,            "bad-upgrade")          \
+       EM(rxrpc_eproto_data_after_last,        "data-after-last")      \
+       EM(rxrpc_eproto_different_last,         "diff-last")            \
+       EM(rxrpc_eproto_early_reply,            "early-reply")          \
+       EM(rxrpc_eproto_improper_term,          "improper-term")        \
+       EM(rxrpc_eproto_no_client_call,         "no-cl-call")           \
+       EM(rxrpc_eproto_no_client_conn,         "no-cl-conn")           \
+       EM(rxrpc_eproto_no_service_call,        "no-sv-call")           \
+       EM(rxrpc_eproto_reupgrade,              "re-upgrade")           \
+       EM(rxrpc_eproto_rxnull_challenge,       "rxnull-chall")         \
+       EM(rxrpc_eproto_rxnull_response,        "rxnull-resp")          \
+       EM(rxrpc_eproto_tx_rot_last,            "tx-rot-last")          \
+       EM(rxrpc_eproto_unexpected_ack,         "unex-ack")             \
+       EM(rxrpc_eproto_unexpected_ackall,      "unex-ackall")          \
+       EM(rxrpc_eproto_unexpected_implicit_end, "unex-impl-end")       \
+       EM(rxrpc_eproto_unexpected_reply,       "unex-reply")           \
+       EM(rxrpc_eproto_wrong_security,         "wrong-sec")            \
+       EM(rxrpc_recvmsg_excess_data,           "recvmsg-excess")       \
+       EM(rxrpc_recvmsg_short_data,            "recvmsg-short")        \
+       E_(rxrpc_sendmsg_late_send,             "sendmsg-late")
+
 #define rxrpc_call_poke_traces \
+       EM(rxrpc_call_poke_abort,               "Abort")        \
+       EM(rxrpc_call_poke_complete,            "Compl")        \
        EM(rxrpc_call_poke_error,               "Error")        \
        EM(rxrpc_call_poke_idle,                "Idle")         \
        EM(rxrpc_call_poke_start,               "Start")        \
 #define rxrpc_skb_traces \
        EM(rxrpc_skb_eaten_by_unshare,          "ETN unshare  ") \
        EM(rxrpc_skb_eaten_by_unshare_nomem,    "ETN unshar-nm") \
+       EM(rxrpc_skb_get_conn_secured,          "GET conn-secd") \
        EM(rxrpc_skb_get_conn_work,             "GET conn-work") \
        EM(rxrpc_skb_get_local_work,            "GET locl-work") \
        EM(rxrpc_skb_get_reject_work,           "GET rej-work ") \
        EM(rxrpc_skb_new_error_report,          "NEW error-rpt") \
        EM(rxrpc_skb_new_jumbo_subpacket,       "NEW jumbo-sub") \
        EM(rxrpc_skb_new_unshared,              "NEW unshared ") \
+       EM(rxrpc_skb_put_conn_secured,          "PUT conn-secd") \
        EM(rxrpc_skb_put_conn_work,             "PUT conn-work") \
        EM(rxrpc_skb_put_error_report,          "PUT error-rep") \
        EM(rxrpc_skb_put_input,                 "PUT input    ") \
 #define rxrpc_peer_traces \
        EM(rxrpc_peer_free,                     "FREE        ") \
        EM(rxrpc_peer_get_accept,               "GET accept  ") \
-       EM(rxrpc_peer_get_activate_call,        "GET act-call") \
        EM(rxrpc_peer_get_bundle,               "GET bundle  ") \
        EM(rxrpc_peer_get_client_conn,          "GET cln-conn") \
        EM(rxrpc_peer_get_input,                "GET input   ") \
        EM(rxrpc_peer_put_bundle,               "PUT bundle  ") \
        EM(rxrpc_peer_put_call,                 "PUT call    ") \
        EM(rxrpc_peer_put_conn,                 "PUT conn    ") \
-       EM(rxrpc_peer_put_discard_tmp,          "PUT disc-tmp") \
        EM(rxrpc_peer_put_input,                "PUT input   ") \
        EM(rxrpc_peer_put_input_error,          "PUT inpt-err") \
        E_(rxrpc_peer_put_keepalive,            "PUT keepaliv")
        EM(rxrpc_bundle_get_client_call,        "GET clt-call") \
        EM(rxrpc_bundle_get_client_conn,        "GET clt-conn") \
        EM(rxrpc_bundle_get_service_conn,       "GET svc-conn") \
+       EM(rxrpc_bundle_put_call,               "PUT call    ") \
        EM(rxrpc_bundle_put_conn,               "PUT conn    ") \
        EM(rxrpc_bundle_put_discard,            "PUT discard ") \
        E_(rxrpc_bundle_new,                    "NEW         ")
        EM(rxrpc_conn_get_call_input,           "GET inp-call") \
        EM(rxrpc_conn_get_conn_input,           "GET inp-conn") \
        EM(rxrpc_conn_get_idle,                 "GET idle    ") \
-       EM(rxrpc_conn_get_poke,                 "GET poke    ") \
+       EM(rxrpc_conn_get_poke_abort,           "GET pk-abort") \
+       EM(rxrpc_conn_get_poke_timer,           "GET poke    ") \
        EM(rxrpc_conn_get_service_conn,         "GET svc-conn") \
        EM(rxrpc_conn_new_client,               "NEW client  ") \
        EM(rxrpc_conn_new_service,              "NEW service ") \
        EM(rxrpc_conn_put_call,                 "PUT call    ") \
        EM(rxrpc_conn_put_call_input,           "PUT inp-call") \
        EM(rxrpc_conn_put_conn_input,           "PUT inp-conn") \
-       EM(rxrpc_conn_put_discard,              "PUT discard ") \
        EM(rxrpc_conn_put_discard_idle,         "PUT disc-idl") \
        EM(rxrpc_conn_put_local_dead,           "PUT loc-dead") \
        EM(rxrpc_conn_put_noreuse,              "PUT noreuse ") \
        EM(rxrpc_conn_put_service_reaped,       "PUT svc-reap") \
        EM(rxrpc_conn_put_unbundle,             "PUT unbundle") \
        EM(rxrpc_conn_put_unidle,               "PUT unidle  ") \
+       EM(rxrpc_conn_put_work,                 "PUT work    ") \
        EM(rxrpc_conn_queue_challenge,          "QUE chall   ") \
        EM(rxrpc_conn_queue_retry_work,         "QUE retry-wk") \
        EM(rxrpc_conn_queue_rx_work,            "QUE rx-work ") \
-       EM(rxrpc_conn_queue_timer,              "QUE timer   ") \
        EM(rxrpc_conn_see_new_service_conn,     "SEE new-svc ") \
        EM(rxrpc_conn_see_reap_service,         "SEE reap-svc") \
        E_(rxrpc_conn_see_work,                 "SEE work    ")
        EM(rxrpc_client_chan_activate,          "ChActv") \
        EM(rxrpc_client_chan_disconnect,        "ChDisc") \
        EM(rxrpc_client_chan_pass,              "ChPass") \
-       EM(rxrpc_client_chan_wait_failed,       "ChWtFl") \
        EM(rxrpc_client_cleanup,                "Clean ") \
        EM(rxrpc_client_discard,                "Discar") \
-       EM(rxrpc_client_duplicate,              "Duplic") \
        EM(rxrpc_client_exposed,                "Expose") \
        EM(rxrpc_client_replace,                "Replac") \
+       EM(rxrpc_client_queue_new_call,         "Q-Call") \
        EM(rxrpc_client_to_active,              "->Actv") \
        E_(rxrpc_client_to_idle,                "->Idle")
 
 #define rxrpc_call_traces \
+       EM(rxrpc_call_get_io_thread,            "GET iothread") \
        EM(rxrpc_call_get_input,                "GET input   ") \
        EM(rxrpc_call_get_kernel_service,       "GET krnl-srv") \
        EM(rxrpc_call_get_notify_socket,        "GET notify  ") \
        EM(rxrpc_call_new_prealloc_service,     "NEW prealloc") \
        EM(rxrpc_call_put_discard_prealloc,     "PUT disc-pre") \
        EM(rxrpc_call_put_discard_error,        "PUT disc-err") \
+       EM(rxrpc_call_put_io_thread,            "PUT iothread") \
        EM(rxrpc_call_put_input,                "PUT input   ") \
        EM(rxrpc_call_put_kernel,               "PUT kernel  ") \
        EM(rxrpc_call_put_poke,                 "PUT poke    ") \
        EM(rxrpc_call_put_sendmsg,              "PUT sendmsg ") \
        EM(rxrpc_call_put_unnotify,             "PUT unnotify") \
        EM(rxrpc_call_put_userid_exists,        "PUT u-exists") \
+       EM(rxrpc_call_put_userid,               "PUT user-id ") \
        EM(rxrpc_call_see_accept,               "SEE accept  ") \
        EM(rxrpc_call_see_activate_client,      "SEE act-clnt") \
        EM(rxrpc_call_see_connect_failed,       "SEE con-fail") \
        EM(rxrpc_call_see_connected,            "SEE connect ") \
+       EM(rxrpc_call_see_disconnected,         "SEE disconn ") \
        EM(rxrpc_call_see_distribute_error,     "SEE dist-err") \
        EM(rxrpc_call_see_input,                "SEE input   ") \
        EM(rxrpc_call_see_release,              "SEE release ") \
 #define EM(a, b) a,
 #define E_(a, b) a
 
+enum rxrpc_abort_reason                { rxrpc_abort_reasons } __mode(byte);
 enum rxrpc_bundle_trace                { rxrpc_bundle_traces } __mode(byte);
 enum rxrpc_call_poke_trace     { rxrpc_call_poke_traces } __mode(byte);
 enum rxrpc_call_trace          { rxrpc_call_traces } __mode(byte);
@@ -404,9 +509,13 @@ enum rxrpc_txqueue_trace   { rxrpc_txqueue_traces } __mode(byte);
  */
 #undef EM
 #undef E_
+
+#ifndef RXRPC_TRACE_ONLY_DEFINE_ENUMS
+
 #define EM(a, b) TRACE_DEFINE_ENUM(a);
 #define E_(a, b) TRACE_DEFINE_ENUM(a);
 
+rxrpc_abort_reasons;
 rxrpc_bundle_traces;
 rxrpc_call_poke_traces;
 rxrpc_call_traces;
@@ -657,14 +766,14 @@ TRACE_EVENT(rxrpc_rx_done,
            );
 
 TRACE_EVENT(rxrpc_abort,
-           TP_PROTO(unsigned int call_nr, const char *why, u32 cid, u32 call_id,
-                    rxrpc_seq_t seq, int abort_code, int error),
+           TP_PROTO(unsigned int call_nr, enum rxrpc_abort_reason why,
+                    u32 cid, u32 call_id, rxrpc_seq_t seq, int abort_code, int error),
 
            TP_ARGS(call_nr, why, cid, call_id, seq, abort_code, error),
 
            TP_STRUCT__entry(
                    __field(unsigned int,               call_nr         )
-                   __array(char,                       why, 4          )
+                   __field(enum rxrpc_abort_reason,    why             )
                    __field(u32,                        cid             )
                    __field(u32,                        call_id         )
                    __field(rxrpc_seq_t,                seq             )
@@ -673,8 +782,8 @@ TRACE_EVENT(rxrpc_abort,
                             ),
 
            TP_fast_assign(
-                   memcpy(__entry->why, why, 4);
                    __entry->call_nr = call_nr;
+                   __entry->why = why;
                    __entry->cid = cid;
                    __entry->call_id = call_id;
                    __entry->abort_code = abort_code;
@@ -685,7 +794,8 @@ TRACE_EVENT(rxrpc_abort,
            TP_printk("c=%08x %08x:%08x s=%u a=%d e=%d %s",
                      __entry->call_nr,
                      __entry->cid, __entry->call_id, __entry->seq,
-                     __entry->abort_code, __entry->error, __entry->why)
+                     __entry->abort_code, __entry->error,
+                     __print_symbolic(__entry->why, rxrpc_abort_reasons))
            );
 
 TRACE_EVENT(rxrpc_call_complete,
@@ -1521,30 +1631,6 @@ TRACE_EVENT(rxrpc_improper_term,
                      __entry->abort_code)
            );
 
-TRACE_EVENT(rxrpc_rx_eproto,
-           TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
-                    const char *why),
-
-           TP_ARGS(call, serial, why),
-
-           TP_STRUCT__entry(
-                   __field(unsigned int,               call            )
-                   __field(rxrpc_serial_t,             serial          )
-                   __field(const char *,               why             )
-                            ),
-
-           TP_fast_assign(
-                   __entry->call = call ? call->debug_id : 0;
-                   __entry->serial = serial;
-                   __entry->why = why;
-                          ),
-
-           TP_printk("c=%08x EPROTO %08x %s",
-                     __entry->call,
-                     __entry->serial,
-                     __entry->why)
-           );
-
 TRACE_EVENT(rxrpc_connect_call,
            TP_PROTO(struct rxrpc_call *call),
 
@@ -1842,6 +1928,8 @@ TRACE_EVENT(rxrpc_call_poked,
 
 #undef EM
 #undef E_
+
+#endif /* RXRPC_TRACE_ONLY_DEFINE_ENUMS */
 #endif /* _TRACE_RXRPC_H */
 
 /* This part must be outside protection */
index 3511095..42a40ad 100644 (file)
@@ -58,7 +58,7 @@
 
 #define PSCI_1_1_FN_SYSTEM_RESET2              PSCI_0_2_FN(18)
 #define PSCI_1_1_FN_MEM_PROTECT                        PSCI_0_2_FN(19)
-#define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE    PSCI_0_2_FN(19)
+#define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE    PSCI_0_2_FN(20)
 
 #define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND      PSCI_0_2_FN64(12)
 #define PSCI_1_0_FN64_NODE_HW_STATE            PSCI_0_2_FN64(13)
@@ -67,7 +67,7 @@
 #define PSCI_1_0_FN64_STAT_COUNT               PSCI_0_2_FN64(17)
 
 #define PSCI_1_1_FN64_SYSTEM_RESET2            PSCI_0_2_FN64(18)
-#define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE  PSCI_0_2_FN64(19)
+#define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE  PSCI_0_2_FN64(20)
 
 /* PSCI v0.2 power state encoding for CPU_SUSPEND function */
 #define PSCI_0_2_POWER_STATE_ID_MASK           0xffff
index eaa932b..ad4fb4e 100644 (file)
@@ -117,7 +117,7 @@ struct xenbus_driver {
                     const struct xenbus_device_id *id);
        void (*otherend_changed)(struct xenbus_device *dev,
                                 enum xenbus_state backend_state);
-       int (*remove)(struct xenbus_device *dev);
+       void (*remove)(struct xenbus_device *dev);
        int (*suspend)(struct xenbus_device *dev);
        int (*resume)(struct xenbus_device *dev);
        int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *);
index 7e5c3dd..0958846 100644 (file)
@@ -894,13 +894,17 @@ config CC_IMPLICIT_FALLTHROUGH
        default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
        default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough)
 
-# Currently, disable gcc-12 array-bounds globally.
+# Currently, disable gcc-11,12 array-bounds globally.
 # We may want to target only particular configurations some day.
+config GCC11_NO_ARRAY_BOUNDS
+       def_bool y
+
 config GCC12_NO_ARRAY_BOUNDS
        def_bool y
 
 config CC_NO_ARRAY_BOUNDS
        bool
+       default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC_VERSION < 120000 && GCC11_NO_ARRAY_BOUNDS
        default y if CC_IS_GCC && GCC_VERSION >= 120000 && GCC_VERSION < 130000 && GCC12_NO_ARRAY_BOUNDS
 
 #
index 8316c23..26de459 100644 (file)
@@ -59,3 +59,4 @@ include/generated/utsversion.h: FORCE
 
 $(obj)/version-timestamp.o: include/generated/utsversion.h
 CFLAGS_version-timestamp.o := -include include/generated/utsversion.h
+KASAN_SANITIZE_version-timestamp.o := n
index 2e04850..882bd56 100644 (file)
@@ -170,12 +170,11 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
                xa_for_each(&ctx->personalities, index, cred)
                        io_uring_show_cred(m, index, cred);
        }
-       if (has_lock)
-               mutex_unlock(&ctx->uring_lock);
 
        seq_puts(m, "PollList:\n");
        for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) {
                struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
+               struct io_hash_bucket *hbl = &ctx->cancel_table_locked.hbs[i];
                struct io_kiocb *req;
 
                spin_lock(&hb->lock);
@@ -183,8 +182,17 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
                        seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
                                        task_work_pending(req->task));
                spin_unlock(&hb->lock);
+
+               if (!has_lock)
+                       continue;
+               hlist_for_each_entry(req, &hbl->list, hash_node)
+                       seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
+                                       task_work_pending(req->task));
        }
 
+       if (has_lock)
+               mutex_unlock(&ctx->uring_lock);
+
        seq_puts(m, "CqOverflowList:\n");
        spin_lock(&ctx->completion_lock);
        list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
index 992dcd9..411bb2d 100644 (file)
@@ -1230,7 +1230,12 @@ static void io_wq_cancel_tw_create(struct io_wq *wq)
 
                worker = container_of(cb, struct io_worker, create_work);
                io_worker_cancel_cb(worker);
-               kfree(worker);
+               /*
+                * Only the worker continuation helper has worker allocated and
+                * hence needs freeing.
+                */
+               if (cb->func == create_worker_cont)
+                       kfree(worker);
        }
 }
 
index ee7da61..32e5fc8 100644 (file)
@@ -223,21 +223,22 @@ enum {
        IOU_POLL_DONE = 0,
        IOU_POLL_NO_ACTION = 1,
        IOU_POLL_REMOVE_POLL_USE_RES = 2,
+       IOU_POLL_REISSUE = 3,
 };
 
 /*
  * All poll tw should go through this. Checks for poll events, manages
  * references, does rewait, etc.
  *
- * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action require,
- * which is either spurious wakeup or multishot CQE is served.
- * IOU_POLL_DONE when it's done with the request, then the mask is stored in req->cqe.res.
- * IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot poll and that the result
- * is stored in req->cqe.
+ * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
+ * require, which is either spurious wakeup or multishot CQE is served.
+ * IOU_POLL_DONE when it's done with the request, then the mask is stored in
+ * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
+ * poll and that the result is stored in req->cqe.
  */
 static int io_poll_check_events(struct io_kiocb *req, bool *locked)
 {
-       int v, ret;
+       int v;
 
        /* req->task == current here, checking PF_EXITING is safe */
        if (unlikely(req->task->flags & PF_EXITING))
@@ -276,10 +277,15 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
                if (!req->cqe.res) {
                        struct poll_table_struct pt = { ._key = req->apoll_events };
                        req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
+                       /*
+                        * We got woken with a mask, but someone else got to
+                        * it first. The above vfs_poll() doesn't add us back
+                        * to the waitqueue, so if we get nothing back, we
+                        * should be safe and attempt a reissue.
+                        */
+                       if (unlikely(!req->cqe.res))
+                               return IOU_POLL_REISSUE;
                }
-
-               if ((unlikely(!req->cqe.res)))
-                       continue;
                if (req->apoll_events & EPOLLONESHOT)
                        return IOU_POLL_DONE;
 
@@ -294,7 +300,7 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
                                return IOU_POLL_REMOVE_POLL_USE_RES;
                        }
                } else {
-                       ret = io_poll_issue(req, locked);
+                       int ret = io_poll_issue(req, locked);
                        if (ret == IOU_STOP_MULTISHOT)
                                return IOU_POLL_REMOVE_POLL_USE_RES;
                        if (ret < 0)
@@ -330,6 +336,9 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
 
                        poll = io_kiocb_to_cmd(req, struct io_poll);
                        req->cqe.res = mangle_poll(req->cqe.res & poll->events);
+               } else if (ret == IOU_POLL_REISSUE) {
+                       io_req_task_submit(req, locked);
+                       return;
                } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
                        req->cqe.res = ret;
                        req_set_fail(req);
@@ -342,7 +351,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
 
                if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
                        io_req_task_complete(req, locked);
-               else if (ret == IOU_POLL_DONE)
+               else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
                        io_req_task_submit(req, locked);
                else
                        io_req_defer_failed(req, ret);
@@ -533,6 +542,14 @@ static bool io_poll_can_finish_inline(struct io_kiocb *req,
        return pt->owning || io_poll_get_ownership(req);
 }
 
+static void io_poll_add_hash(struct io_kiocb *req)
+{
+       if (req->flags & REQ_F_HASH_LOCKED)
+               io_poll_req_insert_locked(req);
+       else
+               io_poll_req_insert(req);
+}
+
 /*
  * Returns 0 when it's handed over for polling. The caller owns the requests if
  * it returns non-zero, but otherwise should not touch it. Negative values
@@ -591,18 +608,17 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
 
        if (mask &&
           ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
-               if (!io_poll_can_finish_inline(req, ipt))
+               if (!io_poll_can_finish_inline(req, ipt)) {
+                       io_poll_add_hash(req);
                        return 0;
+               }
                io_poll_remove_entries(req);
                ipt->result_mask = mask;
                /* no one else has access to the req, forget about the ref */
                return 1;
        }
 
-       if (req->flags & REQ_F_HASH_LOCKED)
-               io_poll_req_insert_locked(req);
-       else
-               io_poll_req_insert(req);
+       io_poll_add_hash(req);
 
        if (mask && (poll->events & EPOLLET) &&
            io_poll_can_finish_inline(req, ipt)) {
index 8227af2..9c3ddd4 100644 (file)
@@ -1062,7 +1062,11 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
                        continue;
 
                req->cqe.flags = io_put_kbuf(req, 0);
-               io_fill_cqe_req(req->ctx, req);
+               if (unlikely(!__io_fill_cqe_req(ctx, req))) {
+                       spin_lock(&ctx->completion_lock);
+                       io_req_cqe_overflow(req);
+                       spin_unlock(&ctx->completion_lock);
+               }
        }
 
        if (unlikely(!nr_events))
index b64c44a..2531f34 100644 (file)
@@ -86,6 +86,11 @@ config GENERIC_IRQ_IPI
        depends on SMP
        select IRQ_DOMAIN_HIERARCHY
 
+# Generic IRQ IPI Mux support
+config GENERIC_IRQ_IPI_MUX
+       bool
+       depends on SMP
+
 # Generic MSI hierarchical interrupt domain support
 config GENERIC_MSI_IRQ
        bool
index b4f5371..f19d308 100644 (file)
@@ -15,6 +15,7 @@ obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
 obj-$(CONFIG_PM_SLEEP) += pm.o
 obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
 obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
+obj-$(CONFIG_GENERIC_IRQ_IPI_MUX) += ipi-mux.o
 obj-$(CONFIG_SMP) += affinity.o
 obj-$(CONFIG_GENERIC_IRQ_DEBUGFS) += debugfs.o
 obj-$(CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR) += matrix.o
diff --git a/kernel/irq/ipi-mux.c b/kernel/irq/ipi-mux.c
new file mode 100644 (file)
index 0000000..fa4fc18
--- /dev/null
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Multiplex several virtual IPIs over a single HW IPI.
+ *
+ * Copyright The Asahi Linux Contributors
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "ipi-mux: " fmt
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/jump_label.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+
+struct ipi_mux_cpu {
+       atomic_t                        enable;
+       atomic_t                        bits;
+};
+
+static struct ipi_mux_cpu __percpu *ipi_mux_pcpu;
+static struct irq_domain *ipi_mux_domain;
+static void (*ipi_mux_send)(unsigned int cpu);
+
+static void ipi_mux_mask(struct irq_data *d)
+{
+       struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu);
+
+       atomic_andnot(BIT(irqd_to_hwirq(d)), &icpu->enable);
+}
+
+static void ipi_mux_unmask(struct irq_data *d)
+{
+       struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu);
+       u32 ibit = BIT(irqd_to_hwirq(d));
+
+       atomic_or(ibit, &icpu->enable);
+
+       /*
+        * The atomic_or() above must complete before the atomic_read()
+        * below to avoid racing ipi_mux_send_mask().
+        */
+       smp_mb__after_atomic();
+
+       /* If a pending IPI was unmasked, raise a parent IPI immediately. */
+       if (atomic_read(&icpu->bits) & ibit)
+               ipi_mux_send(smp_processor_id());
+}
+
+static void ipi_mux_send_mask(struct irq_data *d, const struct cpumask *mask)
+{
+       struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu);
+       u32 ibit = BIT(irqd_to_hwirq(d));
+       unsigned long pending;
+       int cpu;
+
+       for_each_cpu(cpu, mask) {
+               icpu = per_cpu_ptr(ipi_mux_pcpu, cpu);
+
+               /*
+                * This sequence is the mirror of the one in ipi_mux_unmask();
+                * see the comment there. Additionally, release semantics
+                * ensure that the vIPI flag set is ordered after any shared
+                * memory accesses that precede it. This therefore also pairs
+                * with the atomic_fetch_andnot in ipi_mux_process().
+                */
+               pending = atomic_fetch_or_release(ibit, &icpu->bits);
+
+               /*
+                * The atomic_fetch_or_release() above must complete
+                * before the atomic_read() below to avoid racing with
+                * ipi_mux_unmask().
+                */
+               smp_mb__after_atomic();
+
+               /*
+                * The flag writes must complete before the physical IPI is
+                * issued to another CPU. This is implied by the control
+                * dependency on the result of atomic_read() below, which is
+                * itself already ordered after the vIPI flag write.
+                */
+               if (!(pending & ibit) && (atomic_read(&icpu->enable) & ibit))
+                       ipi_mux_send(cpu);
+       }
+}
+
+static const struct irq_chip ipi_mux_chip = {
+       .name           = "IPI Mux",
+       .irq_mask       = ipi_mux_mask,
+       .irq_unmask     = ipi_mux_unmask,
+       .ipi_send_mask  = ipi_mux_send_mask,
+};
+
+static int ipi_mux_domain_alloc(struct irq_domain *d, unsigned int virq,
+                               unsigned int nr_irqs, void *arg)
+{
+       int i;
+
+       for (i = 0; i < nr_irqs; i++) {
+               irq_set_percpu_devid(virq + i);
+               irq_domain_set_info(d, virq + i, i, &ipi_mux_chip, NULL,
+                                   handle_percpu_devid_irq, NULL, NULL);
+       }
+
+       return 0;
+}
+
+static const struct irq_domain_ops ipi_mux_domain_ops = {
+       .alloc          = ipi_mux_domain_alloc,
+       .free           = irq_domain_free_irqs_top,
+};
+
+/**
+ * ipi_mux_process - Process multiplexed virtual IPIs
+ */
+void ipi_mux_process(void)
+{
+       struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu);
+       irq_hw_number_t hwirq;
+       unsigned long ipis;
+       unsigned int en;
+
+       /*
+        * Reading enable mask does not need to be ordered as long as
+        * this function is called from interrupt handler because only
+        * the CPU itself can change it's own enable mask.
+        */
+       en = atomic_read(&icpu->enable);
+
+       /*
+        * Clear the IPIs we are about to handle. This pairs with the
+        * atomic_fetch_or_release() in ipi_mux_send_mask().
+        */
+       ipis = atomic_fetch_andnot(en, &icpu->bits) & en;
+
+       for_each_set_bit(hwirq, &ipis, BITS_PER_TYPE(int))
+               generic_handle_domain_irq(ipi_mux_domain, hwirq);
+}
+
+/**
+ * ipi_mux_create - Create virtual IPIs multiplexed on top of a single
+ * parent IPI.
+ * @nr_ipi:            number of virtual IPIs to create. This should
+ *                     be <= BITS_PER_TYPE(int)
+ * @mux_send:          callback to trigger parent IPI for a particular CPU
+ *
+ * Returns first virq of the newly created virtual IPIs upon success
+ * or <=0 upon failure
+ */
+int ipi_mux_create(unsigned int nr_ipi, void (*mux_send)(unsigned int cpu))
+{
+       struct fwnode_handle *fwnode;
+       struct irq_domain *domain;
+       int rc;
+
+       if (ipi_mux_domain)
+               return -EEXIST;
+
+       if (BITS_PER_TYPE(int) < nr_ipi || !mux_send)
+               return -EINVAL;
+
+       ipi_mux_pcpu = alloc_percpu(typeof(*ipi_mux_pcpu));
+       if (!ipi_mux_pcpu)
+               return -ENOMEM;
+
+       fwnode = irq_domain_alloc_named_fwnode("IPI-Mux");
+       if (!fwnode) {
+               pr_err("unable to create IPI Mux fwnode\n");
+               rc = -ENOMEM;
+               goto fail_free_cpu;
+       }
+
+       domain = irq_domain_create_linear(fwnode, nr_ipi,
+                                         &ipi_mux_domain_ops, NULL);
+       if (!domain) {
+               pr_err("unable to add IPI Mux domain\n");
+               rc = -ENOMEM;
+               goto fail_free_fwnode;
+       }
+
+       domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
+       irq_domain_update_bus_token(domain, DOMAIN_BUS_IPI);
+
+       rc = irq_domain_alloc_irqs(domain, nr_ipi, NUMA_NO_NODE, NULL);
+       if (rc <= 0) {
+               pr_err("unable to alloc IRQs from IPI Mux domain\n");
+               goto fail_free_domain;
+       }
+
+       ipi_mux_domain = domain;
+       ipi_mux_send = mux_send;
+
+       return rc;
+
+fail_free_domain:
+       irq_domain_remove(domain);
+fail_free_fwnode:
+       irq_domain_free_fwnode(fwnode);
+fail_free_cpu:
+       free_percpu(ipi_mux_pcpu);
+       return rc;
+}
index 8fe1da9..1983f1b 100644 (file)
@@ -25,6 +25,9 @@ static DEFINE_MUTEX(irq_domain_mutex);
 
 static struct irq_domain *irq_default_domain;
 
+static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
+                                       unsigned int nr_irqs, int node, void *arg,
+                                       bool realloc, const struct irq_affinity_desc *affinity);
 static void irq_domain_check_hierarchy(struct irq_domain *domain);
 
 struct irqchip_fwid {
@@ -123,23 +126,12 @@ void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
 }
 EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
 
-/**
- * __irq_domain_add() - Allocate a new irq_domain data structure
- * @fwnode: firmware node for the interrupt controller
- * @size: Size of linear map; 0 for radix mapping only
- * @hwirq_max: Maximum number of interrupts supported by controller
- * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
- *              direct mapping
- * @ops: domain callbacks
- * @host_data: Controller private data pointer
- *
- * Allocates and initializes an irq_domain structure.
- * Returns pointer to IRQ domain, or NULL on failure.
- */
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
-                                   irq_hw_number_t hwirq_max, int direct_max,
-                                   const struct irq_domain_ops *ops,
-                                   void *host_data)
+static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode,
+                                             unsigned int size,
+                                             irq_hw_number_t hwirq_max,
+                                             int direct_max,
+                                             const struct irq_domain_ops *ops,
+                                             void *host_data)
 {
        struct irqchip_fwid *fwid;
        struct irq_domain *domain;
@@ -214,25 +206,66 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int s
 
        /* Fill structure */
        INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
-       mutex_init(&domain->revmap_mutex);
        domain->ops = ops;
        domain->host_data = host_data;
        domain->hwirq_max = hwirq_max;
 
-       if (direct_max) {
+       if (direct_max)
                domain->flags |= IRQ_DOMAIN_FLAG_NO_MAP;
-       }
 
        domain->revmap_size = size;
 
+       /*
+        * Hierarchical domains use the domain lock of the root domain
+        * (innermost domain).
+        *
+        * For non-hierarchical domains (as for root domains), the root
+        * pointer is set to the domain itself so that &domain->root->mutex
+        * always points to the right lock.
+        */
+       mutex_init(&domain->mutex);
+       domain->root = domain;
+
        irq_domain_check_hierarchy(domain);
 
+       return domain;
+}
+
+static void __irq_domain_publish(struct irq_domain *domain)
+{
        mutex_lock(&irq_domain_mutex);
        debugfs_add_domain_dir(domain);
        list_add(&domain->link, &irq_domain_list);
        mutex_unlock(&irq_domain_mutex);
 
        pr_debug("Added domain %s\n", domain->name);
+}
+
+/**
+ * __irq_domain_add() - Allocate a new irq_domain data structure
+ * @fwnode: firmware node for the interrupt controller
+ * @size: Size of linear map; 0 for radix mapping only
+ * @hwirq_max: Maximum number of interrupts supported by controller
+ * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
+ *              direct mapping
+ * @ops: domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Allocates and initializes an irq_domain structure.
+ * Returns pointer to IRQ domain, or NULL on failure.
+ */
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
+                                   irq_hw_number_t hwirq_max, int direct_max,
+                                   const struct irq_domain_ops *ops,
+                                   void *host_data)
+{
+       struct irq_domain *domain;
+
+       domain = __irq_domain_create(fwnode, size, hwirq_max, direct_max,
+                                    ops, host_data);
+       if (domain)
+               __irq_domain_publish(domain);
+
        return domain;
 }
 EXPORT_SYMBOL_GPL(__irq_domain_add);
@@ -502,30 +535,34 @@ static bool irq_domain_is_nomap(struct irq_domain *domain)
 static void irq_domain_clear_mapping(struct irq_domain *domain,
                                     irq_hw_number_t hwirq)
 {
+       lockdep_assert_held(&domain->root->mutex);
+
        if (irq_domain_is_nomap(domain))
                return;
 
-       mutex_lock(&domain->revmap_mutex);
        if (hwirq < domain->revmap_size)
                rcu_assign_pointer(domain->revmap[hwirq], NULL);
        else
                radix_tree_delete(&domain->revmap_tree, hwirq);
-       mutex_unlock(&domain->revmap_mutex);
 }
 
 static void irq_domain_set_mapping(struct irq_domain *domain,
                                   irq_hw_number_t hwirq,
                                   struct irq_data *irq_data)
 {
+       /*
+        * This also makes sure that all domains point to the same root when
+        * called from irq_domain_insert_irq() for each domain in a hierarchy.
+        */
+       lockdep_assert_held(&domain->root->mutex);
+
        if (irq_domain_is_nomap(domain))
                return;
 
-       mutex_lock(&domain->revmap_mutex);
        if (hwirq < domain->revmap_size)
                rcu_assign_pointer(domain->revmap[hwirq], irq_data);
        else
                radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
-       mutex_unlock(&domain->revmap_mutex);
 }
 
 static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
@@ -538,6 +575,9 @@ static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
                return;
 
        hwirq = irq_data->hwirq;
+
+       mutex_lock(&domain->root->mutex);
+
        irq_set_status_flags(irq, IRQ_NOREQUEST);
 
        /* remove chip and handler */
@@ -557,10 +597,12 @@ static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
 
        /* Clear reverse map for this hwirq */
        irq_domain_clear_mapping(domain, hwirq);
+
+       mutex_unlock(&domain->root->mutex);
 }
 
-int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
-                        irq_hw_number_t hwirq)
+static int irq_domain_associate_locked(struct irq_domain *domain, unsigned int virq,
+                                      irq_hw_number_t hwirq)
 {
        struct irq_data *irq_data = irq_get_irq_data(virq);
        int ret;
@@ -573,7 +615,6 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
        if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
                return -EINVAL;
 
-       mutex_lock(&irq_domain_mutex);
        irq_data->hwirq = hwirq;
        irq_data->domain = domain;
        if (domain->ops->map) {
@@ -590,23 +631,29 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
                        }
                        irq_data->domain = NULL;
                        irq_data->hwirq = 0;
-                       mutex_unlock(&irq_domain_mutex);
                        return ret;
                }
-
-               /* If not already assigned, give the domain the chip's name */
-               if (!domain->name && irq_data->chip)
-                       domain->name = irq_data->chip->name;
        }
 
        domain->mapcount++;
        irq_domain_set_mapping(domain, hwirq, irq_data);
-       mutex_unlock(&irq_domain_mutex);
 
        irq_clear_status_flags(virq, IRQ_NOREQUEST);
 
        return 0;
 }
+
+int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
+                        irq_hw_number_t hwirq)
+{
+       int ret;
+
+       mutex_lock(&domain->root->mutex);
+       ret = irq_domain_associate_locked(domain, virq, hwirq);
+       mutex_unlock(&domain->root->mutex);
+
+       return ret;
+}
 EXPORT_SYMBOL_GPL(irq_domain_associate);
 
 void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
@@ -619,9 +666,8 @@ void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
        pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
                of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
 
-       for (i = 0; i < count; i++) {
+       for (i = 0; i < count; i++)
                irq_domain_associate(domain, irq_base + i, hwirq_base + i);
-       }
 }
 EXPORT_SYMBOL_GPL(irq_domain_associate_many);
 
@@ -668,6 +714,34 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
 EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
 #endif
 
+static unsigned int irq_create_mapping_affinity_locked(struct irq_domain *domain,
+                                                      irq_hw_number_t hwirq,
+                                                      const struct irq_affinity_desc *affinity)
+{
+       struct device_node *of_node = irq_domain_get_of_node(domain);
+       int virq;
+
+       pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
+
+       /* Allocate a virtual interrupt number */
+       virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
+                                     affinity);
+       if (virq <= 0) {
+               pr_debug("-> virq allocation failed\n");
+               return 0;
+       }
+
+       if (irq_domain_associate_locked(domain, virq, hwirq)) {
+               irq_free_desc(virq);
+               return 0;
+       }
+
+       pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
+               hwirq, of_node_full_name(of_node), virq);
+
+       return virq;
+}
+
 /**
  * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space
  * @domain: domain owning this hardware interrupt or NULL for default domain
@@ -680,14 +754,11 @@ EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
  * on the number returned from that call.
  */
 unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
-                                      irq_hw_number_t hwirq,
-                                      const struct irq_affinity_desc *affinity)
+                                        irq_hw_number_t hwirq,
+                                        const struct irq_affinity_desc *affinity)
 {
-       struct device_node *of_node;
        int virq;
 
-       pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
-
        /* Look for default domain if necessary */
        if (domain == NULL)
                domain = irq_default_domain;
@@ -695,32 +766,19 @@ unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
                WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
                return 0;
        }
-       pr_debug("-> using domain @%p\n", domain);
 
-       of_node = irq_domain_get_of_node(domain);
+       mutex_lock(&domain->root->mutex);
 
        /* Check if mapping already exists */
        virq = irq_find_mapping(domain, hwirq);
        if (virq) {
-               pr_debug("-> existing mapping on virq %d\n", virq);
-               return virq;
-       }
-
-       /* Allocate a virtual interrupt number */
-       virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
-                                     affinity);
-       if (virq <= 0) {
-               pr_debug("-> virq allocation failed\n");
-               return 0;
-       }
-
-       if (irq_domain_associate(domain, virq, hwirq)) {
-               irq_free_desc(virq);
-               return 0;
+               pr_debug("existing mapping on virq %d\n", virq);
+               goto out;
        }
 
-       pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
-               hwirq, of_node_full_name(of_node), virq);
+       virq = irq_create_mapping_affinity_locked(domain, hwirq, affinity);
+out:
+       mutex_unlock(&domain->root->mutex);
 
        return virq;
 }
@@ -789,6 +847,8 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
        if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
                type &= IRQ_TYPE_SENSE_MASK;
 
+       mutex_lock(&domain->root->mutex);
+
        /*
         * If we've already configured this interrupt,
         * don't do it again, or hell will break loose.
@@ -801,7 +861,7 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
                 * interrupt number.
                 */
                if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
-                       return virq;
+                       goto out;
 
                /*
                 * If the trigger type has not been set yet, then set
@@ -809,40 +869,45 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
                 */
                if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
                        irq_data = irq_get_irq_data(virq);
-                       if (!irq_data)
-                               return 0;
+                       if (!irq_data) {
+                               virq = 0;
+                               goto out;
+                       }
 
                        irqd_set_trigger_type(irq_data, type);
-                       return virq;
+                       goto out;
                }
 
                pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
                        hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
-               return 0;
+               virq = 0;
+               goto out;
        }
 
        if (irq_domain_is_hierarchy(domain)) {
-               virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
-               if (virq <= 0)
-                       return 0;
+               virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE,
+                                                   fwspec, false, NULL);
+               if (virq <= 0) {
+                       virq = 0;
+                       goto out;
+               }
        } else {
                /* Create mapping */
-               virq = irq_create_mapping(domain, hwirq);
+               virq = irq_create_mapping_affinity_locked(domain, hwirq, NULL);
                if (!virq)
-                       return virq;
+                       goto out;
        }
 
        irq_data = irq_get_irq_data(virq);
-       if (!irq_data) {
-               if (irq_domain_is_hierarchy(domain))
-                       irq_domain_free_irqs(virq, 1);
-               else
-                       irq_dispose_mapping(virq);
-               return 0;
+       if (WARN_ON(!irq_data)) {
+               virq = 0;
+               goto out;
        }
 
        /* Store trigger type */
        irqd_set_trigger_type(irq_data, type);
+out:
+       mutex_unlock(&domain->root->mutex);
 
        return virq;
 }
@@ -1102,12 +1167,16 @@ struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
        struct irq_domain *domain;
 
        if (size)
-               domain = irq_domain_create_linear(fwnode, size, ops, host_data);
+               domain = __irq_domain_create(fwnode, size, size, 0, ops, host_data);
        else
-               domain = irq_domain_create_tree(fwnode, ops, host_data);
+               domain = __irq_domain_create(fwnode, 0, ~0, 0, ops, host_data);
+
        if (domain) {
+               domain->root = parent->root;
                domain->parent = parent;
                domain->flags |= flags;
+
+               __irq_domain_publish(domain);
        }
 
        return domain;
@@ -1123,10 +1192,6 @@ static void irq_domain_insert_irq(int virq)
 
                domain->mapcount++;
                irq_domain_set_mapping(domain, data->hwirq, data);
-
-               /* If not already assigned, give the domain the chip's name */
-               if (!domain->name && data->chip)
-                       domain->name = data->chip->name;
        }
 
        irq_clear_status_flags(virq, IRQ_NOREQUEST);
@@ -1426,40 +1491,12 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
        return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
 }
 
-/**
- * __irq_domain_alloc_irqs - Allocate IRQs from domain
- * @domain:    domain to allocate from
- * @irq_base:  allocate specified IRQ number if irq_base >= 0
- * @nr_irqs:   number of IRQs to allocate
- * @node:      NUMA node id for memory allocation
- * @arg:       domain specific argument
- * @realloc:   IRQ descriptors have already been allocated if true
- * @affinity:  Optional irq affinity mask for multiqueue devices
- *
- * Allocate IRQ numbers and initialized all data structures to support
- * hierarchy IRQ domains.
- * Parameter @realloc is mainly to support legacy IRQs.
- * Returns error code or allocated IRQ number
- *
- * The whole process to setup an IRQ has been split into two steps.
- * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
- * descriptor and required hardware resources. The second step,
- * irq_domain_activate_irq(), is to program the hardware with preallocated
- * resources. In this way, it's easier to rollback when failing to
- * allocate resources.
- */
-int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
-                           unsigned int nr_irqs, int node, void *arg,
-                           bool realloc, const struct irq_affinity_desc *affinity)
+static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
+                                       unsigned int nr_irqs, int node, void *arg,
+                                       bool realloc, const struct irq_affinity_desc *affinity)
 {
        int i, ret, virq;
 
-       if (domain == NULL) {
-               domain = irq_default_domain;
-               if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
-                       return -EINVAL;
-       }
-
        if (realloc && irq_base >= 0) {
                virq = irq_base;
        } else {
@@ -1478,24 +1515,18 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
                goto out_free_desc;
        }
 
-       mutex_lock(&irq_domain_mutex);
        ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
-       if (ret < 0) {
-               mutex_unlock(&irq_domain_mutex);
+       if (ret < 0)
                goto out_free_irq_data;
-       }
 
        for (i = 0; i < nr_irqs; i++) {
                ret = irq_domain_trim_hierarchy(virq + i);
-               if (ret) {
-                       mutex_unlock(&irq_domain_mutex);
+               if (ret)
                        goto out_free_irq_data;
-               }
        }
-       
+
        for (i = 0; i < nr_irqs; i++)
                irq_domain_insert_irq(virq + i);
-       mutex_unlock(&irq_domain_mutex);
 
        return virq;
 
@@ -1505,6 +1536,48 @@ out_free_desc:
        irq_free_descs(virq, nr_irqs);
        return ret;
 }
+
+/**
+ * __irq_domain_alloc_irqs - Allocate IRQs from domain
+ * @domain:    domain to allocate from
+ * @irq_base:  allocate specified IRQ number if irq_base >= 0
+ * @nr_irqs:   number of IRQs to allocate
+ * @node:      NUMA node id for memory allocation
+ * @arg:       domain specific argument
+ * @realloc:   IRQ descriptors have already been allocated if true
+ * @affinity:  Optional irq affinity mask for multiqueue devices
+ *
+ * Allocate IRQ numbers and initialized all data structures to support
+ * hierarchy IRQ domains.
+ * Parameter @realloc is mainly to support legacy IRQs.
+ * Returns error code or allocated IRQ number
+ *
+ * The whole process to setup an IRQ has been split into two steps.
+ * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
+ * descriptor and required hardware resources. The second step,
+ * irq_domain_activate_irq(), is to program the hardware with preallocated
+ * resources. In this way, it's easier to rollback when failing to
+ * allocate resources.
+ */
+int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
+                           unsigned int nr_irqs, int node, void *arg,
+                           bool realloc, const struct irq_affinity_desc *affinity)
+{
+       int ret;
+
+       if (domain == NULL) {
+               domain = irq_default_domain;
+               if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
+                       return -EINVAL;
+       }
+
+       mutex_lock(&domain->root->mutex);
+       ret = irq_domain_alloc_irqs_locked(domain, irq_base, nr_irqs, node, arg,
+                                          realloc, affinity);
+       mutex_unlock(&domain->root->mutex);
+
+       return ret;
+}
 EXPORT_SYMBOL_GPL(__irq_domain_alloc_irqs);
 
 /* The irq_data was moved, fix the revmap to refer to the new location */
@@ -1512,11 +1585,12 @@ static void irq_domain_fix_revmap(struct irq_data *d)
 {
        void __rcu **slot;
 
+       lockdep_assert_held(&d->domain->root->mutex);
+
        if (irq_domain_is_nomap(d->domain))
                return;
 
        /* Fix up the revmap. */
-       mutex_lock(&d->domain->revmap_mutex);
        if (d->hwirq < d->domain->revmap_size) {
                /* Not using radix tree */
                rcu_assign_pointer(d->domain->revmap[d->hwirq], d);
@@ -1525,7 +1599,6 @@ static void irq_domain_fix_revmap(struct irq_data *d)
                if (slot)
                        radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
        }
-       mutex_unlock(&d->domain->revmap_mutex);
 }
 
 /**
@@ -1541,8 +1614,8 @@ static void irq_domain_fix_revmap(struct irq_data *d)
  */
 int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
 {
-       struct irq_data *child_irq_data;
-       struct irq_data *root_irq_data = irq_get_irq_data(virq);
+       struct irq_data *irq_data = irq_get_irq_data(virq);
+       struct irq_data *parent_irq_data;
        struct irq_desc *desc;
        int rv = 0;
 
@@ -1567,47 +1640,46 @@ int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
        if (WARN_ON(!irq_domain_is_hierarchy(domain)))
                return -EINVAL;
 
-       if (!root_irq_data)
+       if (!irq_data)
                return -EINVAL;
 
-       if (domain->parent != root_irq_data->domain)
+       if (domain->parent != irq_data->domain)
                return -EINVAL;
 
-       child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
-                                     irq_data_get_node(root_irq_data));
-       if (!child_irq_data)
+       parent_irq_data = kzalloc_node(sizeof(*parent_irq_data), GFP_KERNEL,
+                                      irq_data_get_node(irq_data));
+       if (!parent_irq_data)
                return -ENOMEM;
 
-       mutex_lock(&irq_domain_mutex);
+       mutex_lock(&domain->root->mutex);
 
        /* Copy the original irq_data. */
-       *child_irq_data = *root_irq_data;
+       *parent_irq_data = *irq_data;
 
        /*
-        * Overwrite the root_irq_data, which is embedded in struct
-        * irq_desc, with values for this domain.
+        * Overwrite the irq_data, which is embedded in struct irq_desc, with
+        * values for this domain.
         */
-       root_irq_data->parent_data = child_irq_data;
-       root_irq_data->domain = domain;
-       root_irq_data->mask = 0;
-       root_irq_data->hwirq = 0;
-       root_irq_data->chip = NULL;
-       root_irq_data->chip_data = NULL;
+       irq_data->parent_data = parent_irq_data;
+       irq_data->domain = domain;
+       irq_data->mask = 0;
+       irq_data->hwirq = 0;
+       irq_data->chip = NULL;
+       irq_data->chip_data = NULL;
 
        /* May (probably does) set hwirq, chip, etc. */
        rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
        if (rv) {
                /* Restore the original irq_data. */
-               *root_irq_data = *child_irq_data;
-               kfree(child_irq_data);
+               *irq_data = *parent_irq_data;
+               kfree(parent_irq_data);
                goto error;
        }
 
-       irq_domain_fix_revmap(child_irq_data);
-       irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
-
+       irq_domain_fix_revmap(parent_irq_data);
+       irq_domain_set_mapping(domain, irq_data->hwirq, irq_data);
 error:
-       mutex_unlock(&irq_domain_mutex);
+       mutex_unlock(&domain->root->mutex);
 
        return rv;
 }
@@ -1623,8 +1695,8 @@ EXPORT_SYMBOL_GPL(irq_domain_push_irq);
  */
 int irq_domain_pop_irq(struct irq_domain *domain, int virq)
 {
-       struct irq_data *root_irq_data = irq_get_irq_data(virq);
-       struct irq_data *child_irq_data;
+       struct irq_data *irq_data = irq_get_irq_data(virq);
+       struct irq_data *parent_irq_data;
        struct irq_data *tmp_irq_data;
        struct irq_desc *desc;
 
@@ -1646,37 +1718,37 @@ int irq_domain_pop_irq(struct irq_domain *domain, int virq)
        if (domain == NULL)
                return -EINVAL;
 
-       if (!root_irq_data)
+       if (!irq_data)
                return -EINVAL;
 
        tmp_irq_data = irq_domain_get_irq_data(domain, virq);
 
        /* We can only "pop" if this domain is at the top of the list */
-       if (WARN_ON(root_irq_data != tmp_irq_data))
+       if (WARN_ON(irq_data != tmp_irq_data))
                return -EINVAL;
 
-       if (WARN_ON(root_irq_data->domain != domain))
+       if (WARN_ON(irq_data->domain != domain))
                return -EINVAL;
 
-       child_irq_data = root_irq_data->parent_data;
-       if (WARN_ON(!child_irq_data))
+       parent_irq_data = irq_data->parent_data;
+       if (WARN_ON(!parent_irq_data))
                return -EINVAL;
 
-       mutex_lock(&irq_domain_mutex);
+       mutex_lock(&domain->root->mutex);
 
-       root_irq_data->parent_data = NULL;
+       irq_data->parent_data = NULL;
 
-       irq_domain_clear_mapping(domain, root_irq_data->hwirq);
+       irq_domain_clear_mapping(domain, irq_data->hwirq);
        irq_domain_free_irqs_hierarchy(domain, virq, 1);
 
        /* Restore the original irq_data. */
-       *root_irq_data = *child_irq_data;
+       *irq_data = *parent_irq_data;
 
-       irq_domain_fix_revmap(root_irq_data);
+       irq_domain_fix_revmap(irq_data);
 
-       mutex_unlock(&irq_domain_mutex);
+       mutex_unlock(&domain->root->mutex);
 
-       kfree(child_irq_data);
+       kfree(parent_irq_data);
 
        return 0;
 }
@@ -1690,17 +1762,20 @@ EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
 void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
 {
        struct irq_data *data = irq_get_irq_data(virq);
+       struct irq_domain *domain;
        int i;
 
        if (WARN(!data || !data->domain || !data->domain->ops->free,
                 "NULL pointer, cannot free irq\n"))
                return;
 
-       mutex_lock(&irq_domain_mutex);
+       domain = data->domain;
+
+       mutex_lock(&domain->root->mutex);
        for (i = 0; i < nr_irqs; i++)
                irq_domain_remove_irq(virq + i);
-       irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
-       mutex_unlock(&irq_domain_mutex);
+       irq_domain_free_irqs_hierarchy(domain, virq, nr_irqs);
+       mutex_unlock(&domain->root->mutex);
 
        irq_domain_free_irq_data(virq, nr_irqs);
        irq_free_descs(virq, nr_irqs);
@@ -1865,6 +1940,13 @@ void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
        irq_set_handler_data(virq, handler_data);
 }
 
+static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
+                                       unsigned int nr_irqs, int node, void *arg,
+                                       bool realloc, const struct irq_affinity_desc *affinity)
+{
+       return -EINVAL;
+}
+
 static void irq_domain_check_hierarchy(struct irq_domain *domain)
 {
 }
index f35d9cc..bfbc12d 100644 (file)
@@ -157,14 +157,11 @@ static void test_kallsyms_compression_ratio(void)
 static int lookup_name(void *data, const char *name, struct module *mod, unsigned long addr)
 {
        u64 t0, t1, t;
-       unsigned long flags;
        struct test_stat *stat = (struct test_stat *)data;
 
-       local_irq_save(flags);
-       t0 = sched_clock();
+       t0 = ktime_get_ns();
        (void)kallsyms_lookup_name(name);
-       t1 = sched_clock();
-       local_irq_restore(flags);
+       t1 = ktime_get_ns();
 
        t = t1 - t0;
        if (t < stat->min)
@@ -234,18 +231,15 @@ static int find_symbol(void *data, const char *name, struct module *mod, unsigne
 static void test_perf_kallsyms_on_each_symbol(void)
 {
        u64 t0, t1;
-       unsigned long flags;
        struct test_stat stat;
 
        memset(&stat, 0, sizeof(stat));
        stat.max = INT_MAX;
        stat.name = stub_name;
        stat.perf = 1;
-       local_irq_save(flags);
-       t0 = sched_clock();
+       t0 = ktime_get_ns();
        kallsyms_on_each_symbol(find_symbol, &stat);
-       t1 = sched_clock();
-       local_irq_restore(flags);
+       t1 = ktime_get_ns();
        pr_info("kallsyms_on_each_symbol() traverse all: %lld ns\n", t1 - t0);
 }
 
@@ -270,17 +264,14 @@ static int match_symbol(void *data, unsigned long addr)
 static void test_perf_kallsyms_on_each_match_symbol(void)
 {
        u64 t0, t1;
-       unsigned long flags;
        struct test_stat stat;
 
        memset(&stat, 0, sizeof(stat));
        stat.max = INT_MAX;
        stat.name = stub_name;
-       local_irq_save(flags);
-       t0 = sched_clock();
+       t0 = ktime_get_ns();
        kallsyms_on_each_match_symbol(match_symbol, stat.name, &stat);
-       t1 = sched_clock();
-       local_irq_restore(flags);
+       t1 = ktime_get_ns();
        pr_info("kallsyms_on_each_match_symbol() traverse all: %lld ns\n", t1 - t0);
 }
 
index dcec1b7..a60c561 100644 (file)
@@ -159,7 +159,7 @@ static bool __report_matches(const struct expect_report *r)
        const bool is_assert = (r->access[0].type | r->access[1].type) & KCSAN_ACCESS_ASSERT;
        bool ret = false;
        unsigned long flags;
-       typeof(observed.lines) expect;
+       typeof(*observed.lines) *expect;
        const char *end;
        char *cur;
        int i;
@@ -168,6 +168,10 @@ static bool __report_matches(const struct expect_report *r)
        if (!report_available())
                return false;
 
+       expect = kmalloc(sizeof(observed.lines), GFP_KERNEL);
+       if (WARN_ON(!expect))
+               return false;
+
        /* Generate expected report contents. */
 
        /* Title */
@@ -253,6 +257,7 @@ static bool __report_matches(const struct expect_report *r)
                strstr(observed.lines[2], expect[1])));
 out:
        spin_unlock_irqrestore(&observed.lock, flags);
+       kfree(expect);
        return ret;
 }
 
index 25b582b..bb1ee6d 100644 (file)
@@ -2604,27 +2604,71 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
                .user_mask = NULL,
                .flags     = SCA_USER,  /* clear the user requested mask */
        };
+       union cpumask_rcuhead {
+               cpumask_t cpumask;
+               struct rcu_head rcu;
+       };
 
        __do_set_cpus_allowed(p, &ac);
-       kfree(ac.user_mask);
+
+       /*
+        * Because this is called with p->pi_lock held, it is not possible
+        * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
+        * kfree_rcu().
+        */
+       kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
+}
+
+static cpumask_t *alloc_user_cpus_ptr(int node)
+{
+       /*
+        * See do_set_cpus_allowed() above for the rcu_head usage.
+        */
+       int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
+
+       return kmalloc_node(size, GFP_KERNEL, node);
 }
 
 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
                      int node)
 {
+       cpumask_t *user_mask;
        unsigned long flags;
 
-       if (!src->user_cpus_ptr)
+       /*
+        * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
+        * may differ by now due to racing.
+        */
+       dst->user_cpus_ptr = NULL;
+
+       /*
+        * This check is racy and losing the race is a valid situation.
+        * It is not worth the extra overhead of taking the pi_lock on
+        * every fork/clone.
+        */
+       if (data_race(!src->user_cpus_ptr))
                return 0;
 
-       dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
-       if (!dst->user_cpus_ptr)
+       user_mask = alloc_user_cpus_ptr(node);
+       if (!user_mask)
                return -ENOMEM;
 
-       /* Use pi_lock to protect content of user_cpus_ptr */
+       /*
+        * Use pi_lock to protect content of user_cpus_ptr
+        *
+        * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
+        * do_set_cpus_allowed().
+        */
        raw_spin_lock_irqsave(&src->pi_lock, flags);
-       cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
+       if (src->user_cpus_ptr) {
+               swap(dst->user_cpus_ptr, user_mask);
+               cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
+       }
        raw_spin_unlock_irqrestore(&src->pi_lock, flags);
+
+       if (unlikely(user_mask))
+               kfree(user_mask);
+
        return 0;
 }
 
@@ -3581,6 +3625,11 @@ static inline bool rq_has_pinned_tasks(struct rq *rq)
        return false;
 }
 
+static inline cpumask_t *alloc_user_cpus_ptr(int node)
+{
+       return NULL;
+}
+
 #endif /* !CONFIG_SMP */
 
 static void
@@ -5504,7 +5553,9 @@ void scheduler_tick(void)
        unsigned long thermal_pressure;
        u64 resched_latency;
 
-       arch_scale_freq_tick();
+       if (housekeeping_cpu(cpu, HK_TYPE_TICK))
+               arch_scale_freq_tick();
+
        sched_clock_tick();
 
        rq_lock(rq, &rf);
@@ -8239,8 +8290,8 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
        if (retval)
                goto out_put_task;
 
-       user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
-       if (!user_mask) {
+       user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
+       if (IS_ENABLED(CONFIG_SMP) && !user_mask) {
                retval = -ENOMEM;
                goto out_put_task;
        }
index 475ecce..5e2c2c2 100644 (file)
@@ -18,7 +18,7 @@
 #include "tick-internal.h"
 
 /**
- * tick_program_event
+ * tick_program_event - program the CPU local timer device for the next event
  */
 int tick_program_event(ktime_t expires, int force)
 {
@@ -99,7 +99,7 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
 }
 
 /**
- * tick_check_oneshot_mode - check whether the system is in oneshot mode
+ * tick_oneshot_mode_active - check whether the system is in oneshot mode
  *
  * returns 1 when either nohz or highres are enabled. otherwise 0.
  */
index 526257b..f4198af 100644 (file)
@@ -462,7 +462,7 @@ struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec)
 EXPORT_SYMBOL(ns_to_kernel_old_timeval);
 
 /**
- * set_normalized_timespec - set timespec sec and nsec parts and normalize
+ * set_normalized_timespec64 - set timespec sec and nsec parts and normalize
  *
  * @ts:                pointer to timespec variable to be set
  * @sec:       seconds to set
@@ -526,7 +526,7 @@ struct timespec64 ns_to_timespec64(s64 nsec)
 EXPORT_SYMBOL(ns_to_timespec64);
 
 /**
- * msecs_to_jiffies: - convert milliseconds to jiffies
+ * __msecs_to_jiffies: - convert milliseconds to jiffies
  * @m: time in milliseconds
  *
  * conversion is done as follows:
@@ -541,12 +541,12 @@ EXPORT_SYMBOL(ns_to_timespec64);
  *   handling any 32-bit overflows.
  *   for the details see __msecs_to_jiffies()
  *
- * msecs_to_jiffies() checks for the passed in value being a constant
+ * __msecs_to_jiffies() checks for the passed in value being a constant
  * via __builtin_constant_p() allowing gcc to eliminate most of the
  * code, __msecs_to_jiffies() is called if the value passed does not
  * allow constant folding and the actual conversion must be done at
  * runtime.
- * the _msecs_to_jiffies helpers are the HZ dependent conversion
+ * The _msecs_to_jiffies helpers are the HZ dependent conversion
  * routines found in include/linux/jiffies.h
  */
 unsigned long __msecs_to_jiffies(const unsigned int m)
index f72b9f1..5579ead 100644 (file)
@@ -1590,10 +1590,10 @@ void __weak read_persistent_clock64(struct timespec64 *ts)
 /**
  * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
  *                                        from the boot.
+ * @wall_time:   current time as returned by persistent clock
+ * @boot_offset:  offset that is defined as wall_time - boot_time
  *
  * Weak dummy function for arches that do not yet support it.
- * @wall_time: - current time as returned by persistent clock
- * @boot_offset: - offset that is defined as wall_time - boot_time
  *
  * The default function calculates offset based on the current value of
  * local_clock(). This way architectures that support sched_clock() but don't
@@ -1701,7 +1701,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
 }
 
 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
-/**
+/*
  * We have three kinds of time sources to use for sleep time
  * injection, the preference order is:
  * 1) non-stop clocksource
@@ -1722,7 +1722,7 @@ bool timekeeping_rtc_skipresume(void)
        return !suspend_timing_needed;
 }
 
-/**
+/*
  * 1) can be determined whether to use or not only when doing
  * timekeeping_resume() which is invoked after rtc_suspend(),
  * so we can't skip rtc_suspend() surely if system has 1).
index 45e93ec..2afe4c5 100644 (file)
@@ -23,7 +23,6 @@
                }                                                               \
                if (!--retry)                                                   \
                        break;                                                  \
-               cpu_relax();                                                    \
        }                                                                       \
 } while (0)
 
index d036c78..685e30e 100644 (file)
@@ -1640,7 +1640,13 @@ void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
        end = PFN_DOWN(base + size);
 
        for (; cursor < end; cursor++) {
-               memblock_free_pages(pfn_to_page(cursor), cursor, 0);
+               /*
+                * Reserved pages are always initialized by the end of
+                * memblock_free_all() (by memmap_init() and, if deferred
+                * initialization is enabled, memmap_init_reserved_pages()), so
+                * these pages can be released directly to the buddy allocator.
+                */
+               __free_pages_core(pfn_to_page(cursor), 0);
                totalram_pages_inc();
        }
 }
index 9630b12..82c7005 100644 (file)
@@ -305,13 +305,12 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
        kfree(priv);
 }
 
-static int xen_9pfs_front_remove(struct xenbus_device *dev)
+static void xen_9pfs_front_remove(struct xenbus_device *dev)
 {
        struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev);
 
        dev_set_drvdata(&dev->dev, NULL);
        xen_9pfs_front_free(priv);
-       return 0;
 }
 
 static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
index fd8c6a7..506f83d 100644 (file)
@@ -505,8 +505,9 @@ found_ptype:
        NAPI_GRO_CB(skb)->count = 1;
        if (unlikely(skb_is_gso(skb))) {
                NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
-               /* Only support TCP at the moment. */
-               if (!skb_is_gso_tcp(skb))
+               /* Only support TCP and non DODGY users. */
+               if (!skb_is_gso_tcp(skb) ||
+                   (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
                        NAPI_GRO_CB(skb)->flush = 1;
        }
 
index a06a9f8..ada087b 100644 (file)
@@ -505,6 +505,7 @@ csum_copy_err:
 static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
                                     struct raw6_sock *rp)
 {
+       struct ipv6_txoptions *opt;
        struct sk_buff *skb;
        int err = 0;
        int offset;
@@ -522,6 +523,9 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
 
        offset = rp->offset;
        total_len = inet_sk(sk)->cork.base.length;
+       opt = inet6_sk(sk)->cork.opt;
+       total_len -= opt ? opt->opt_flen : 0;
+
        if (offset >= total_len - 1) {
                err = -EINVAL;
                ip6_flush_pending_frames(sk);
index e76d345..ac5caf5 100644 (file)
@@ -10,6 +10,7 @@ rxrpc-y := \
        call_accept.o \
        call_event.o \
        call_object.o \
+       call_state.o \
        conn_client.o \
        conn_event.o \
        conn_object.o \
index 7ea576f..ebbd4a1 100644 (file)
@@ -155,10 +155,10 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
 
                if (service_id) {
                        write_lock(&local->services_lock);
-                       if (rcu_access_pointer(local->service))
+                       if (local->service)
                                goto service_in_use;
                        rx->local = local;
-                       rcu_assign_pointer(local->service, rx);
+                       local->service = rx;
                        write_unlock(&local->services_lock);
 
                        rx->sk.sk_state = RXRPC_SERVER_BOUND;
@@ -328,7 +328,6 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
                mutex_unlock(&call->user_mutex);
        }
 
-       rxrpc_put_peer(cp.peer, rxrpc_peer_put_discard_tmp);
        _leave(" = %p", call);
        return call;
 }
@@ -374,13 +373,17 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
  * @sock: The socket the call is on
  * @call: The call to check
  *
- * Allow a kernel service to find out whether a call is still alive -
- * ie. whether it has completed.
+ * Allow a kernel service to find out whether a call is still alive - whether
+ * it has completed successfully and all received data has been consumed.
  */
 bool rxrpc_kernel_check_life(const struct socket *sock,
                             const struct rxrpc_call *call)
 {
-       return call->state != RXRPC_CALL_COMPLETE;
+       if (!rxrpc_call_is_complete(call))
+               return true;
+       if (call->completion != RXRPC_CALL_SUCCEEDED)
+               return false;
+       return !skb_queue_empty(&call->recvmsg_queue);
 }
 EXPORT_SYMBOL(rxrpc_kernel_check_life);
 
@@ -872,9 +875,9 @@ static int rxrpc_release_sock(struct sock *sk)
 
        sk->sk_state = RXRPC_CLOSE;
 
-       if (rx->local && rcu_access_pointer(rx->local->service) == rx) {
+       if (rx->local && rx->local->service == rx) {
                write_lock(&rx->local->services_lock);
-               rcu_assign_pointer(rx->local->service, NULL);
+               rx->local->service = NULL;
                write_unlock(&rx->local->services_lock);
        }
 
@@ -957,16 +960,9 @@ static const struct net_proto_family rxrpc_family_ops = {
 static int __init af_rxrpc_init(void)
 {
        int ret = -1;
-       unsigned int tmp;
 
        BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof_field(struct sk_buff, cb));
 
-       get_random_bytes(&tmp, sizeof(tmp));
-       tmp &= 0x3fffffff;
-       if (tmp == 0)
-               tmp = 1;
-       idr_set_cursor(&rxrpc_client_conn_ids, tmp);
-
        ret = -ENOMEM;
        rxrpc_call_jar = kmem_cache_create(
                "rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
@@ -1062,7 +1058,6 @@ static void __exit af_rxrpc_exit(void)
         * are released.
         */
        rcu_barrier();
-       rxrpc_destroy_client_conn_ids();
 
        destroy_workqueue(rxrpc_workqueue);
        rxrpc_exit_security();
index 1809252..433060c 100644 (file)
@@ -38,6 +38,7 @@ struct rxrpc_txbuf;
 enum rxrpc_skb_mark {
        RXRPC_SKB_MARK_PACKET,          /* Received packet */
        RXRPC_SKB_MARK_ERROR,           /* Error notification */
+       RXRPC_SKB_MARK_SERVICE_CONN_SECURED, /* Service connection response has been verified */
        RXRPC_SKB_MARK_REJECT_BUSY,     /* Reject with BUSY */
        RXRPC_SKB_MARK_REJECT_ABORT,    /* Reject with ABORT (code in skb->priority) */
 };
@@ -75,13 +76,7 @@ struct rxrpc_net {
 
        bool                    live;
 
-       bool                    kill_all_client_conns;
        atomic_t                nr_client_conns;
-       spinlock_t              client_conn_cache_lock; /* Lock for ->*_client_conns */
-       struct mutex            client_conn_discard_lock; /* Prevent multiple discarders */
-       struct list_head        idle_client_conns;
-       struct work_struct      client_conn_reaper;
-       struct timer_list       client_conn_reap_timer;
 
        struct hlist_head       local_endpoints;
        struct mutex            local_mutex;    /* Lock for ->local_endpoints */
@@ -202,6 +197,7 @@ struct rxrpc_host_header {
  * - max 48 bytes (struct sk_buff::cb)
  */
 struct rxrpc_skb_priv {
+       struct rxrpc_connection *conn;  /* Connection referred to (poke packet) */
        u16             offset;         /* Offset of data */
        u16             len;            /* Length of data */
        u8              flags;
@@ -262,13 +258,11 @@ struct rxrpc_security {
 
        /* respond to a challenge */
        int (*respond_to_challenge)(struct rxrpc_connection *,
-                                   struct sk_buff *,
-                                   u32 *);
+                                   struct sk_buff *);
 
        /* verify a response */
        int (*verify_response)(struct rxrpc_connection *,
-                              struct sk_buff *,
-                              u32 *);
+                              struct sk_buff *);
 
        /* clear connection security */
        void (*clear)(struct rxrpc_connection *);
@@ -283,22 +277,34 @@ struct rxrpc_local {
        struct rcu_head         rcu;
        atomic_t                active_users;   /* Number of users of the local endpoint */
        refcount_t              ref;            /* Number of references to the structure */
-       struct rxrpc_net        *rxnet;         /* The network ns in which this resides */
+       struct net              *net;           /* The network namespace */
+       struct rxrpc_net        *rxnet;         /* Our bits in the network namespace */
        struct hlist_node       link;
        struct socket           *socket;        /* my UDP socket */
        struct task_struct      *io_thread;
        struct completion       io_thread_ready; /* Indication that the I/O thread started */
-       struct rxrpc_sock __rcu *service;       /* Service(s) listening on this endpoint */
+       struct rxrpc_sock       *service;       /* Service(s) listening on this endpoint */
        struct rw_semaphore     defrag_sem;     /* control re-enablement of IP DF bit */
        struct sk_buff_head     rx_queue;       /* Received packets */
+       struct list_head        conn_attend_q;  /* Conns requiring immediate attention */
        struct list_head        call_attend_q;  /* Calls requiring immediate attention */
+
        struct rb_root          client_bundles; /* Client connection bundles by socket params */
        spinlock_t              client_bundles_lock; /* Lock for client_bundles */
+       bool                    kill_all_client_conns;
+       struct list_head        idle_client_conns;
+       struct timer_list       client_conn_reap_timer;
+       unsigned long           client_conn_flags;
+#define RXRPC_CLIENT_CONN_REAP_TIMER   0       /* The client conn reap timer expired */
+
        spinlock_t              lock;           /* access lock */
        rwlock_t                services_lock;  /* lock for services list */
        int                     debug_id;       /* debug ID for printks */
        bool                    dead;
        bool                    service_closed; /* Service socket closed */
+       struct idr              conn_ids;       /* List of connection IDs */
+       struct list_head        new_client_calls; /* Newly created client calls need connection */
+       spinlock_t              client_call_lock; /* Lock for ->new_client_calls */
        struct sockaddr_rxrpc   srx;            /* local address */
 };
 
@@ -356,7 +362,6 @@ struct rxrpc_conn_proto {
 
 struct rxrpc_conn_parameters {
        struct rxrpc_local      *local;         /* Representation of local endpoint */
-       struct rxrpc_peer       *peer;          /* Remote endpoint */
        struct key              *key;           /* Security details */
        bool                    exclusive;      /* T if conn is exclusive */
        bool                    upgrade;        /* T if service ID can be upgraded */
@@ -365,10 +370,21 @@ struct rxrpc_conn_parameters {
 };
 
 /*
+ * Call completion condition (state == RXRPC_CALL_COMPLETE).
+ */
+enum rxrpc_call_completion {
+       RXRPC_CALL_SUCCEEDED,           /* - Normal termination */
+       RXRPC_CALL_REMOTELY_ABORTED,    /* - call aborted by peer */
+       RXRPC_CALL_LOCALLY_ABORTED,     /* - call aborted locally on error or close */
+       RXRPC_CALL_LOCAL_ERROR,         /* - call failed due to local error */
+       RXRPC_CALL_NETWORK_ERROR,       /* - call terminated by network error */
+       NR__RXRPC_CALL_COMPLETIONS
+};
+
+/*
  * Bits in the connection flags.
  */
 enum rxrpc_conn_flag {
-       RXRPC_CONN_HAS_IDR,             /* Has a client conn ID assigned */
        RXRPC_CONN_IN_SERVICE_CONNS,    /* Conn is in peer->service_conns */
        RXRPC_CONN_DONT_REUSE,          /* Don't reuse this connection */
        RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
@@ -388,6 +404,7 @@ enum rxrpc_conn_flag {
  */
 enum rxrpc_conn_event {
        RXRPC_CONN_EV_CHALLENGE,        /* Send challenge packet */
+       RXRPC_CONN_EV_ABORT_CALLS,      /* Abort attached calls */
 };
 
 /*
@@ -395,13 +412,13 @@ enum rxrpc_conn_event {
  */
 enum rxrpc_conn_proto_state {
        RXRPC_CONN_UNUSED,              /* Connection not yet attempted */
+       RXRPC_CONN_CLIENT_UNSECURED,    /* Client connection needs security init */
        RXRPC_CONN_CLIENT,              /* Client connection */
        RXRPC_CONN_SERVICE_PREALLOC,    /* Service connection preallocation */
        RXRPC_CONN_SERVICE_UNSECURED,   /* Service unsecured connection */
        RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
        RXRPC_CONN_SERVICE,             /* Service secured connection */
-       RXRPC_CONN_REMOTELY_ABORTED,    /* Conn aborted by peer */
-       RXRPC_CONN_LOCALLY_ABORTED,     /* Conn aborted locally */
+       RXRPC_CONN_ABORTED,             /* Conn aborted */
        RXRPC_CONN__NR_STATES
 };
 
@@ -412,17 +429,16 @@ struct rxrpc_bundle {
        struct rxrpc_local      *local;         /* Representation of local endpoint */
        struct rxrpc_peer       *peer;          /* Remote endpoint */
        struct key              *key;           /* Security details */
+       const struct rxrpc_security *security;  /* applied security module */
        refcount_t              ref;
        atomic_t                active;         /* Number of active users */
        unsigned int            debug_id;
        u32                     security_level; /* Security level selected */
        u16                     service_id;     /* Service ID for this connection */
        bool                    try_upgrade;    /* True if the bundle is attempting upgrade */
-       bool                    alloc_conn;     /* True if someone's getting a conn */
        bool                    exclusive;      /* T if conn is exclusive */
        bool                    upgrade;        /* T if service ID can be upgraded */
-       short                   alloc_error;    /* Error from last conn allocation */
-       spinlock_t              channel_lock;
+       unsigned short          alloc_error;    /* Error from last conn allocation */
        struct rb_node          local_node;     /* Node in local->client_conns */
        struct list_head        waiting_calls;  /* Calls waiting for channels */
        unsigned long           avail_chans;    /* Mask of available channels */
@@ -440,6 +456,7 @@ struct rxrpc_connection {
        struct rxrpc_peer       *peer;          /* Remote endpoint */
        struct rxrpc_net        *rxnet;         /* Network namespace to which call belongs */
        struct key              *key;           /* Security details */
+       struct list_head        attend_link;    /* Link in local->conn_attend_q */
 
        refcount_t              ref;
        atomic_t                active;         /* Active count for service conns */
@@ -449,7 +466,7 @@ struct rxrpc_connection {
        unsigned char           act_chans;      /* Mask of active channels */
        struct rxrpc_channel {
                unsigned long           final_ack_at;   /* Time at which to issue final ACK */
-               struct rxrpc_call __rcu *call;          /* Active call */
+               struct rxrpc_call       *call;          /* Active call */
                unsigned int            call_debug_id;  /* call->debug_id */
                u32                     call_id;        /* ID of current call */
                u32                     call_counter;   /* Call ID counter */
@@ -470,6 +487,7 @@ struct rxrpc_connection {
        struct list_head        link;           /* link in master connection list */
        struct sk_buff_head     rx_queue;       /* received conn-level packets */
 
+       struct mutex            security_lock;  /* Lock for security management */
        const struct rxrpc_security *security;  /* applied security module */
        union {
                struct {
@@ -483,7 +501,8 @@ struct rxrpc_connection {
        unsigned long           idle_timestamp; /* Time at which last became idle */
        spinlock_t              state_lock;     /* state-change lock */
        enum rxrpc_conn_proto_state state;      /* current state of connection */
-       u32                     abort_code;     /* Abort code of connection abort */
+       enum rxrpc_call_completion completion;  /* Completion condition */
+       s32                     abort_code;     /* Abort code of connection abort */
        int                     debug_id;       /* debug ID for printks */
        atomic_t                serial;         /* packet serial number counter */
        unsigned int            hi_serial;      /* highest serial number received */
@@ -527,7 +546,8 @@ enum rxrpc_call_flag {
        RXRPC_CALL_KERNEL,              /* The call was made by the kernel */
        RXRPC_CALL_UPGRADE,             /* Service upgrade was requested for the call */
        RXRPC_CALL_EXCLUSIVE,           /* The call uses a once-only connection */
-       RXRPC_CALL_RX_IS_IDLE,          /* Reception is idle - send an ACK */
+       RXRPC_CALL_RX_IS_IDLE,          /* recvmsg() is idle - send an ACK */
+       RXRPC_CALL_RECVMSG_READ_ALL,    /* recvmsg() read all of the received data */
 };
 
 /*
@@ -558,18 +578,6 @@ enum rxrpc_call_state {
 };
 
 /*
- * Call completion condition (state == RXRPC_CALL_COMPLETE).
- */
-enum rxrpc_call_completion {
-       RXRPC_CALL_SUCCEEDED,           /* - Normal termination */
-       RXRPC_CALL_REMOTELY_ABORTED,    /* - call aborted by peer */
-       RXRPC_CALL_LOCALLY_ABORTED,     /* - call aborted locally on error or close */
-       RXRPC_CALL_LOCAL_ERROR,         /* - call failed due to local error */
-       RXRPC_CALL_NETWORK_ERROR,       /* - call terminated by network error */
-       NR__RXRPC_CALL_COMPLETIONS
-};
-
-/*
  * Call Tx congestion management modes.
  */
 enum rxrpc_congest_mode {
@@ -587,6 +595,7 @@ enum rxrpc_congest_mode {
 struct rxrpc_call {
        struct rcu_head         rcu;
        struct rxrpc_connection *conn;          /* connection carrying call */
+       struct rxrpc_bundle     *bundle;        /* Connection bundle to use */
        struct rxrpc_peer       *peer;          /* Peer record for remote address */
        struct rxrpc_local      *local;         /* Representation of local endpoint */
        struct rxrpc_sock __rcu *socket;        /* socket responsible */
@@ -609,7 +618,7 @@ struct rxrpc_call {
        struct work_struct      destroyer;      /* In-process-context destroyer */
        rxrpc_notify_rx_t       notify_rx;      /* kernel service Rx notification function */
        struct list_head        link;           /* link in master call list */
-       struct list_head        chan_wait_link; /* Link in conn->bundle->waiting_calls */
+       struct list_head        wait_link;      /* Link in local->new_client_calls */
        struct hlist_node       error_link;     /* link in error distribution list */
        struct list_head        accept_link;    /* Link in rx->acceptq */
        struct list_head        recvmsg_link;   /* Link in rx->recvmsg_q */
@@ -623,10 +632,13 @@ struct rxrpc_call {
        unsigned long           flags;
        unsigned long           events;
        spinlock_t              notify_lock;    /* Kernel notification lock */
-       rwlock_t                state_lock;     /* lock for state transition */
-       u32                     abort_code;     /* Local/remote abort code */
+       unsigned int            send_abort_why; /* Why the abort [enum rxrpc_abort_reason] */
+       s32                     send_abort;     /* Abort code to be sent */
+       short                   send_abort_err; /* Error to be associated with the abort */
+       rxrpc_seq_t             send_abort_seq; /* DATA packet that incurred the abort (or 0) */
+       s32                     abort_code;     /* Local/remote abort code */
        int                     error;          /* Local error incurred */
-       enum rxrpc_call_state   state;          /* current state of call */
+       enum rxrpc_call_state   _state;         /* Current state of call (needs barrier) */
        enum rxrpc_call_completion completion;  /* Call completion condition */
        refcount_t              ref;
        u8                      security_ix;    /* Security type */
@@ -812,9 +824,11 @@ extern struct workqueue_struct *rxrpc_workqueue;
  */
 int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
 void rxrpc_discard_prealloc(struct rxrpc_sock *);
-int rxrpc_new_incoming_call(struct rxrpc_local *, struct rxrpc_peer *,
-                           struct rxrpc_connection *, struct sockaddr_rxrpc *,
-                           struct sk_buff *);
+bool rxrpc_new_incoming_call(struct rxrpc_local *local,
+                            struct rxrpc_peer *peer,
+                            struct rxrpc_connection *conn,
+                            struct sockaddr_rxrpc *peer_srx,
+                            struct sk_buff *skb);
 void rxrpc_accept_incoming_calls(struct rxrpc_local *);
 int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
 
@@ -834,7 +848,7 @@ void rxrpc_reduce_call_timer(struct rxrpc_call *call,
                             unsigned long now,
                             enum rxrpc_timer_trace why);
 
-void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb);
+bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb);
 
 /*
  * call_object.c
@@ -851,6 +865,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
                                         struct sockaddr_rxrpc *,
                                         struct rxrpc_call_params *, gfp_t,
                                         unsigned int);
+void rxrpc_start_call_timer(struct rxrpc_call *call);
 void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
                         struct sk_buff *);
 void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
@@ -873,32 +888,88 @@ static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
 }
 
 /*
+ * call_state.c
+ */
+bool rxrpc_set_call_completion(struct rxrpc_call *call,
+                              enum rxrpc_call_completion compl,
+                              u32 abort_code,
+                              int error);
+bool rxrpc_call_completed(struct rxrpc_call *call);
+bool rxrpc_abort_call(struct rxrpc_call *call, rxrpc_seq_t seq,
+                     u32 abort_code, int error, enum rxrpc_abort_reason why);
+void rxrpc_prefail_call(struct rxrpc_call *call, enum rxrpc_call_completion compl,
+                       int error);
+
+static inline void rxrpc_set_call_state(struct rxrpc_call *call,
+                                       enum rxrpc_call_state state)
+{
+       /* Order write of completion info before write of ->state. */
+       smp_store_release(&call->_state, state);
+       wake_up(&call->waitq);
+}
+
+static inline enum rxrpc_call_state __rxrpc_call_state(const struct rxrpc_call *call)
+{
+       return call->_state; /* Only inside I/O thread */
+}
+
+static inline bool __rxrpc_call_is_complete(const struct rxrpc_call *call)
+{
+       return __rxrpc_call_state(call) == RXRPC_CALL_COMPLETE;
+}
+
+static inline enum rxrpc_call_state rxrpc_call_state(const struct rxrpc_call *call)
+{
+       /* Order read ->state before read of completion info. */
+       return smp_load_acquire(&call->_state);
+}
+
+static inline bool rxrpc_call_is_complete(const struct rxrpc_call *call)
+{
+       return rxrpc_call_state(call) == RXRPC_CALL_COMPLETE;
+}
+
+static inline bool rxrpc_call_has_failed(const struct rxrpc_call *call)
+{
+       return rxrpc_call_is_complete(call) && call->completion != RXRPC_CALL_SUCCEEDED;
+}
+
+/*
  * conn_client.c
  */
 extern unsigned int rxrpc_reap_client_connections;
 extern unsigned long rxrpc_conn_idle_client_expiry;
 extern unsigned long rxrpc_conn_idle_client_fast_expiry;
-extern struct idr rxrpc_client_conn_ids;
 
-void rxrpc_destroy_client_conn_ids(void);
+void rxrpc_purge_client_connections(struct rxrpc_local *local);
 struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace);
 void rxrpc_put_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace);
-int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
-                      struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
-                      gfp_t);
+int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp);
+void rxrpc_connect_client_calls(struct rxrpc_local *local);
 void rxrpc_expose_client_call(struct rxrpc_call *);
 void rxrpc_disconnect_client_call(struct rxrpc_bundle *, struct rxrpc_call *);
+void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle);
 void rxrpc_put_client_conn(struct rxrpc_connection *, enum rxrpc_conn_trace);
-void rxrpc_discard_expired_client_conns(struct work_struct *);
-void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
+void rxrpc_discard_expired_client_conns(struct rxrpc_local *local);
 void rxrpc_clean_up_local_conns(struct rxrpc_local *);
 
 /*
  * conn_event.c
  */
+void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, struct sk_buff *skb,
+                               unsigned int channel);
+int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb,
+                    s32 abort_code, int err, enum rxrpc_abort_reason why);
 void rxrpc_process_connection(struct work_struct *);
 void rxrpc_process_delayed_final_acks(struct rxrpc_connection *, bool);
-int rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb);
+bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb);
+void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb);
+
+static inline bool rxrpc_is_conn_aborted(const struct rxrpc_connection *conn)
+{
+       /* Order reading the abort info after the state check. */
+       return smp_load_acquire(&conn->state) == RXRPC_CONN_ABORTED;
+}
 
 /*
  * conn_object.c
@@ -906,6 +977,7 @@ int rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb);
 extern unsigned int rxrpc_connection_expiry;
 extern unsigned int rxrpc_closed_conn_expiry;
 
+void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why);
 struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *, gfp_t);
 struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *,
                                                          struct sockaddr_rxrpc *,
@@ -961,12 +1033,19 @@ void rxrpc_implicit_end_call(struct rxrpc_call *, struct sk_buff *);
  */
 int rxrpc_encap_rcv(struct sock *, struct sk_buff *);
 void rxrpc_error_report(struct sock *);
+bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
+                       s32 abort_code, int err);
 int rxrpc_io_thread(void *data);
 static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local)
 {
        wake_up_process(local->io_thread);
 }
 
+static inline bool rxrpc_protocol_error(struct sk_buff *skb, enum rxrpc_abort_reason why)
+{
+       return rxrpc_direct_abort(skb, why, RX_PROTOCOL_ERROR, -EPROTO);
+}
+
 /*
  * insecure.c
  */
@@ -1048,6 +1127,7 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net)
 int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb);
 int rxrpc_send_abort_packet(struct rxrpc_call *);
 int rxrpc_send_data_packet(struct rxrpc_call *, struct rxrpc_txbuf *);
+void rxrpc_send_conn_abort(struct rxrpc_connection *conn);
 void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb);
 void rxrpc_send_keepalive(struct rxrpc_peer *);
 void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb);
@@ -1063,12 +1143,11 @@ void rxrpc_peer_keepalive_worker(struct work_struct *);
  */
 struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
                                         const struct sockaddr_rxrpc *);
-struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
-                                    struct sockaddr_rxrpc *, gfp_t);
+struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
+                                    struct sockaddr_rxrpc *srx, gfp_t gfp);
 struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t,
                                    enum rxrpc_peer_trace);
-void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
-                            struct rxrpc_peer *);
+void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer);
 void rxrpc_destroy_all_peers(struct rxrpc_net *);
 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *, enum rxrpc_peer_trace);
 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *, enum rxrpc_peer_trace);
@@ -1086,33 +1165,22 @@ extern const struct seq_operations rxrpc_local_seq_ops;
  * recvmsg.c
  */
 void rxrpc_notify_socket(struct rxrpc_call *);
-bool __rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int);
-bool rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int);
-bool __rxrpc_call_completed(struct rxrpc_call *);
-bool rxrpc_call_completed(struct rxrpc_call *);
-bool __rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int);
-bool rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int);
 int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
 
 /*
  * Abort a call due to a protocol error.
  */
-static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
-                                       struct sk_buff *skb,
-                                       const char *eproto_why,
-                                       const char *why,
-                                       u32 abort_code)
+static inline int rxrpc_abort_eproto(struct rxrpc_call *call,
+                                    struct sk_buff *skb,
+                                    s32 abort_code,
+                                    enum rxrpc_abort_reason why)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 
-       trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why);
-       return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO);
+       rxrpc_abort_call(call, sp->hdr.seq, abort_code, -EPROTO, why);
+       return -EPROTO;
 }
 
-#define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \
-       __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \
-                            (abort_why), (abort_code))
-
 /*
  * rtt.c
  */
@@ -1144,6 +1212,8 @@ struct key *rxrpc_look_up_server_security(struct rxrpc_connection *,
 /*
  * sendmsg.c
  */
+bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error,
+                        enum rxrpc_abort_reason why);
 int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
 
 /*
index c024016..3e8689f 100644 (file)
@@ -99,7 +99,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
        if (!call)
                return -ENOMEM;
        call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
-       call->state = RXRPC_CALL_SERVER_PREALLOC;
+       rxrpc_set_call_state(call, RXRPC_CALL_SERVER_PREALLOC);
        __set_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events);
 
        trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
@@ -280,7 +280,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
                                          (peer_tail + 1) &
                                          (RXRPC_BACKLOG_MAX - 1));
 
-                       rxrpc_new_incoming_peer(rx, local, peer);
+                       rxrpc_new_incoming_peer(local, peer);
                }
 
                /* Now allocate and set up the connection */
@@ -326,11 +326,11 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
  * If we want to report an error, we mark the skb with the packet type and
  * abort code and return false.
  */
-int rxrpc_new_incoming_call(struct rxrpc_local *local,
-                           struct rxrpc_peer *peer,
-                           struct rxrpc_connection *conn,
-                           struct sockaddr_rxrpc *peer_srx,
-                           struct sk_buff *skb)
+bool rxrpc_new_incoming_call(struct rxrpc_local *local,
+                            struct rxrpc_peer *peer,
+                            struct rxrpc_connection *conn,
+                            struct sockaddr_rxrpc *peer_srx,
+                            struct sk_buff *skb)
 {
        const struct rxrpc_security *sec = NULL;
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
@@ -339,18 +339,17 @@ int rxrpc_new_incoming_call(struct rxrpc_local *local,
 
        _enter("");
 
-       /* Don't set up a call for anything other than the first DATA packet. */
-       if (sp->hdr.seq != 1 ||
-           sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
-               return 0; /* Just discard */
+       /* Don't set up a call for anything other than a DATA packet. */
+       if (sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
+               return rxrpc_protocol_error(skb, rxrpc_eproto_no_service_call);
 
-       rcu_read_lock();
+       read_lock(&local->services_lock);
 
        /* Weed out packets to services we're not offering.  Packets that would
         * begin a call are explicitly rejected and the rest are just
         * discarded.
         */
-       rx = rcu_dereference(local->service);
+       rx = local->service;
        if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
                    sp->hdr.serviceId != rx->second_service)
            ) {
@@ -363,16 +362,14 @@ int rxrpc_new_incoming_call(struct rxrpc_local *local,
        if (!conn) {
                sec = rxrpc_get_incoming_security(rx, skb);
                if (!sec)
-                       goto reject;
+                       goto unsupported_security;
        }
 
        spin_lock(&rx->incoming_lock);
        if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
            rx->sk.sk_state == RXRPC_CLOSE) {
-               trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
-                                 sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
-               skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
-               skb->priority = RX_INVALID_OPERATION;
+               rxrpc_direct_abort(skb, rxrpc_abort_shut_down,
+                                  RX_INVALID_OPERATION, -ESHUTDOWN);
                goto no_call;
        }
 
@@ -402,7 +399,7 @@ int rxrpc_new_incoming_call(struct rxrpc_local *local,
        spin_unlock(&conn->state_lock);
 
        spin_unlock(&rx->incoming_lock);
-       rcu_read_unlock();
+       read_unlock(&local->services_lock);
 
        if (hlist_unhashed(&call->error_link)) {
                spin_lock(&call->peer->lock);
@@ -413,22 +410,24 @@ int rxrpc_new_incoming_call(struct rxrpc_local *local,
        _leave(" = %p{%d}", call, call->debug_id);
        rxrpc_input_call_event(call, skb);
        rxrpc_put_call(call, rxrpc_call_put_input);
-       return 0;
+       return true;
 
 unsupported_service:
-       trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                         RX_INVALID_OPERATION, EOPNOTSUPP);
-       skb->priority = RX_INVALID_OPERATION;
-       goto reject;
+       read_unlock(&local->services_lock);
+       return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
+                                 RX_INVALID_OPERATION, -EOPNOTSUPP);
+unsupported_security:
+       read_unlock(&local->services_lock);
+       return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
+                                 RX_INVALID_OPERATION, -EKEYREJECTED);
 no_call:
        spin_unlock(&rx->incoming_lock);
-reject:
-       rcu_read_unlock();
+       read_unlock(&local->services_lock);
        _leave(" = f [%u]", skb->mark);
-       return -EPROTO;
+       return false;
 discard:
-       rcu_read_unlock();
-       return 0;
+       read_unlock(&local->services_lock);
+       return true;
 }
 
 /*
index b2cf448..1abdef1 100644 (file)
@@ -251,6 +251,41 @@ out:
        _leave("");
 }
 
+/*
+ * Start transmitting the reply to a service.  This cancels the need to ACK the
+ * request if we haven't yet done so.
+ */
+static void rxrpc_begin_service_reply(struct rxrpc_call *call)
+{
+       unsigned long now = jiffies;
+
+       rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SEND_REPLY);
+       WRITE_ONCE(call->delay_ack_at, now + MAX_JIFFY_OFFSET);
+       if (call->ackr_reason == RXRPC_ACK_DELAY)
+               call->ackr_reason = 0;
+       trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
+}
+
+/*
+ * Close the transmission phase.  After this point there is no more data to be
+ * transmitted in the call.
+ */
+static void rxrpc_close_tx_phase(struct rxrpc_call *call)
+{
+       _debug("________awaiting reply/ACK__________");
+
+       switch (__rxrpc_call_state(call)) {
+       case RXRPC_CALL_CLIENT_SEND_REQUEST:
+               rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_REPLY);
+               break;
+       case RXRPC_CALL_SERVER_SEND_REPLY:
+               rxrpc_set_call_state(call, RXRPC_CALL_SERVER_AWAIT_ACK);
+               break;
+       default:
+               break;
+       }
+}
+
 static bool rxrpc_tx_window_has_space(struct rxrpc_call *call)
 {
        unsigned int winsize = min_t(unsigned int, call->tx_winsize,
@@ -270,9 +305,11 @@ static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
 {
        struct rxrpc_txbuf *txb;
 
-       if (rxrpc_is_client_call(call) &&
-           !test_bit(RXRPC_CALL_EXPOSED, &call->flags))
+       if (!test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
+               if (list_empty(&call->tx_sendmsg))
+                       return;
                rxrpc_expose_client_call(call);
+       }
 
        while ((txb = list_first_entry_or_null(&call->tx_sendmsg,
                                               struct rxrpc_txbuf, call_link))) {
@@ -283,6 +320,9 @@ static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
                call->tx_top = txb->seq;
                list_add_tail(&txb->call_link, &call->tx_buffer);
 
+               if (txb->wire.flags & RXRPC_LAST_PACKET)
+                       rxrpc_close_tx_phase(call);
+
                rxrpc_transmit_one(call, txb);
 
                if (!rxrpc_tx_window_has_space(call))
@@ -292,16 +332,15 @@ static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
 
 static void rxrpc_transmit_some_data(struct rxrpc_call *call)
 {
-       switch (call->state) {
+       switch (__rxrpc_call_state(call)) {
        case RXRPC_CALL_SERVER_ACK_REQUEST:
                if (list_empty(&call->tx_sendmsg))
                        return;
+               rxrpc_begin_service_reply(call);
                fallthrough;
 
        case RXRPC_CALL_SERVER_SEND_REPLY:
-       case RXRPC_CALL_SERVER_AWAIT_ACK:
        case RXRPC_CALL_CLIENT_SEND_REQUEST:
-       case RXRPC_CALL_CLIENT_AWAIT_REPLY:
                if (!rxrpc_tx_window_has_space(call))
                        return;
                if (list_empty(&call->tx_sendmsg)) {
@@ -331,21 +370,31 @@ static void rxrpc_send_initial_ping(struct rxrpc_call *call)
 /*
  * Handle retransmission and deferred ACK/abort generation.
  */
-void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
+bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
 {
        unsigned long now, next, t;
        rxrpc_serial_t ackr_serial;
        bool resend = false, expired = false;
+       s32 abort_code;
 
        rxrpc_see_call(call, rxrpc_call_see_input);
 
        //printk("\n--------------------\n");
        _enter("{%d,%s,%lx}",
-              call->debug_id, rxrpc_call_states[call->state], call->events);
+              call->debug_id, rxrpc_call_states[__rxrpc_call_state(call)],
+              call->events);
 
-       if (call->state == RXRPC_CALL_COMPLETE)
+       if (__rxrpc_call_is_complete(call))
                goto out;
 
+       /* Handle abort request locklessly, vs rxrpc_propose_abort(). */
+       abort_code = smp_load_acquire(&call->send_abort);
+       if (abort_code) {
+               rxrpc_abort_call(call, 0, call->send_abort, call->send_abort_err,
+                                call->send_abort_why);
+               goto out;
+       }
+
        if (skb && skb->mark == RXRPC_SKB_MARK_ERROR)
                goto out;
 
@@ -358,7 +407,7 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
        }
 
        t = READ_ONCE(call->expect_req_by);
-       if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
+       if (__rxrpc_call_state(call) == RXRPC_CALL_SERVER_RECV_REQUEST &&
            time_after_eq(now, t)) {
                trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
                expired = true;
@@ -429,11 +478,12 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
                if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
                    (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
                        trace_rxrpc_call_reset(call);
-                       rxrpc_abort_call("EXP", call, 0, RX_CALL_DEAD, -ECONNRESET);
+                       rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ECONNRESET,
+                                        rxrpc_abort_call_reset);
                } else {
-                       rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME);
+                       rxrpc_abort_call(call, 0, RX_CALL_TIMEOUT, -ETIME,
+                                        rxrpc_abort_call_timeout);
                }
-               rxrpc_send_abort_packet(call);
                goto out;
        }
 
@@ -441,7 +491,7 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
                rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
                               rxrpc_propose_ack_ping_for_lost_ack);
 
-       if (resend && call->state != RXRPC_CALL_CLIENT_RECV_REPLY)
+       if (resend && __rxrpc_call_state(call) != RXRPC_CALL_CLIENT_RECV_REPLY)
                rxrpc_resend(call, NULL);
 
        if (test_and_clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags))
@@ -453,7 +503,7 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
                               rxrpc_propose_ack_input_data);
 
        /* Make sure the timer is restarted */
-       if (call->state != RXRPC_CALL_COMPLETE) {
+       if (!__rxrpc_call_is_complete(call)) {
                next = call->expect_rx_by;
 
 #define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
@@ -474,9 +524,15 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
        }
 
 out:
-       if (call->state == RXRPC_CALL_COMPLETE)
+       if (__rxrpc_call_is_complete(call)) {
                del_timer_sync(&call->timer);
+               if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
+                       rxrpc_disconnect_call(call);
+               if (call->security)
+                       call->security->free_call_crypto(call);
+       }
        if (call->acks_hard_ack != call->tx_bottom)
                rxrpc_shrink_call_tx_buffer(call);
        _leave("");
+       return true;
 }
index 89dcf60..3ded5a2 100644 (file)
@@ -50,7 +50,7 @@ void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what)
        struct rxrpc_local *local = call->local;
        bool busy;
 
-       if (call->state < RXRPC_CALL_COMPLETE) {
+       if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) {
                spin_lock_bh(&local->lock);
                busy = !list_empty(&call->attend_link);
                trace_rxrpc_poke_call(call, busy, what);
@@ -69,7 +69,7 @@ static void rxrpc_call_timer_expired(struct timer_list *t)
 
        _enter("%d", call->debug_id);
 
-       if (call->state < RXRPC_CALL_COMPLETE) {
+       if (!__rxrpc_call_is_complete(call)) {
                trace_rxrpc_timer_expired(call, jiffies);
                rxrpc_poke_call(call, rxrpc_call_poke_timer);
        }
@@ -150,7 +150,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
        timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
        INIT_WORK(&call->destroyer, rxrpc_destroy_call);
        INIT_LIST_HEAD(&call->link);
-       INIT_LIST_HEAD(&call->chan_wait_link);
+       INIT_LIST_HEAD(&call->wait_link);
        INIT_LIST_HEAD(&call->accept_link);
        INIT_LIST_HEAD(&call->recvmsg_link);
        INIT_LIST_HEAD(&call->sock_link);
@@ -162,7 +162,6 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
        init_waitqueue_head(&call->waitq);
        spin_lock_init(&call->notify_lock);
        spin_lock_init(&call->tx_lock);
-       rwlock_init(&call->state_lock);
        refcount_set(&call->ref, 1);
        call->debug_id = debug_id;
        call->tx_total_len = -1;
@@ -211,7 +210,6 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
        now = ktime_get_real();
        call->acks_latest_ts    = now;
        call->cong_tstamp       = now;
-       call->state             = RXRPC_CALL_CLIENT_AWAIT_CONN;
        call->dest_srx          = *srx;
        call->interruptibility  = p->interruptibility;
        call->tx_total_len      = p->tx_total_len;
@@ -227,11 +225,13 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
 
        ret = rxrpc_init_client_call_security(call);
        if (ret < 0) {
-               __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
+               rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, ret);
                rxrpc_put_call(call, rxrpc_call_put_discard_error);
                return ERR_PTR(ret);
        }
 
+       rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_CONN);
+
        trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
                         p->user_call_ID, rxrpc_call_new_client);
 
@@ -242,7 +242,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
 /*
  * Initiate the call ack/resend/expiry timer.
  */
-static void rxrpc_start_call_timer(struct rxrpc_call *call)
+void rxrpc_start_call_timer(struct rxrpc_call *call)
 {
        unsigned long now = jiffies;
        unsigned long j = now + MAX_JIFFY_OFFSET;
@@ -287,6 +287,39 @@ static void rxrpc_put_call_slot(struct rxrpc_call *call)
 }
 
 /*
+ * Start the process of connecting a call.  We obtain a peer and a connection
+ * bundle, but the actual association of a call with a connection is offloaded
+ * to the I/O thread to simplify locking.
+ */
+static int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp)
+{
+       struct rxrpc_local *local = call->local;
+       int ret = 0;
+
+       _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
+
+       call->peer = rxrpc_lookup_peer(local, &call->dest_srx, gfp);
+       if (!call->peer)
+               goto error;
+
+       ret = rxrpc_look_up_bundle(call, gfp);
+       if (ret < 0)
+               goto error;
+
+       trace_rxrpc_client(NULL, -1, rxrpc_client_queue_new_call);
+       rxrpc_get_call(call, rxrpc_call_get_io_thread);
+       spin_lock(&local->client_call_lock);
+       list_add_tail(&call->wait_link, &local->new_client_calls);
+       spin_unlock(&local->client_call_lock);
+       rxrpc_wake_up_io_thread(local);
+       return 0;
+
+error:
+       __set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
+       return ret;
+}
+
+/*
  * Set up a call for the given parameters.
  * - Called with the socket lock held, which it must release.
  * - If it returns a call, the call's lock will need releasing by the caller.
@@ -365,14 +398,10 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
        /* Set up or get a connection record and set the protocol parameters,
         * including channel number and call ID.
         */
-       ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
+       ret = rxrpc_connect_call(call, gfp);
        if (ret < 0)
                goto error_attached_to_socket;
 
-       rxrpc_see_call(call, rxrpc_call_see_connected);
-
-       rxrpc_start_call_timer(call);
-
        _leave(" = %p [new]", call);
        return call;
 
@@ -384,27 +413,23 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
 error_dup_user_ID:
        write_unlock(&rx->call_lock);
        release_sock(&rx->sk);
-       __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
-                                   RX_CALL_DEAD, -EEXIST);
+       rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EEXIST);
        trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), 0,
                         rxrpc_call_see_userid_exists);
-       rxrpc_release_call(rx, call);
        mutex_unlock(&call->user_mutex);
        rxrpc_put_call(call, rxrpc_call_put_userid_exists);
        _leave(" = -EEXIST");
        return ERR_PTR(-EEXIST);
 
        /* We got an error, but the call is attached to the socket and is in
-        * need of release.  However, we might now race with recvmsg() when
-        * completing the call queues it.  Return 0 from sys_sendmsg() and
+        * need of release.  However, we might now race with recvmsg() when it
+        * completion notifies the socket.  Return 0 from sys_sendmsg() and
         * leave the error to recvmsg() to deal with.
         */
 error_attached_to_socket:
        trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), ret,
                         rxrpc_call_see_connect_failed);
-       set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
-       __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
-                                   RX_CALL_DEAD, ret);
+       rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
        _leave(" = c=%08x [err]", call->debug_id);
        return call;
 }
@@ -427,32 +452,32 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
        call->call_id           = sp->hdr.callNumber;
        call->dest_srx.srx_service = sp->hdr.serviceId;
        call->cid               = sp->hdr.cid;
-       call->state             = RXRPC_CALL_SERVER_SECURING;
        call->cong_tstamp       = skb->tstamp;
 
+       __set_bit(RXRPC_CALL_EXPOSED, &call->flags);
+       rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
+
        spin_lock(&conn->state_lock);
 
        switch (conn->state) {
        case RXRPC_CONN_SERVICE_UNSECURED:
        case RXRPC_CONN_SERVICE_CHALLENGING:
-               call->state = RXRPC_CALL_SERVER_SECURING;
+               rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
                break;
        case RXRPC_CONN_SERVICE:
-               call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
+               rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
                break;
 
-       case RXRPC_CONN_REMOTELY_ABORTED:
-               __rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
-                                           conn->abort_code, conn->error);
-               break;
-       case RXRPC_CONN_LOCALLY_ABORTED:
-               __rxrpc_abort_call("CON", call, 1,
-                                  conn->abort_code, conn->error);
+       case RXRPC_CONN_ABORTED:
+               rxrpc_set_call_completion(call, conn->completion,
+                                         conn->abort_code, conn->error);
                break;
        default:
                BUG();
        }
 
+       rxrpc_get_call(call, rxrpc_call_get_io_thread);
+
        /* Set the channel for this call.  We don't get channel_lock as we're
         * only defending against the data_ready handler (which we're called
         * from) and the RESPONSE packet parser (which is only really
@@ -462,7 +487,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
        chan = sp->hdr.cid & RXRPC_CHANNELMASK;
        conn->channels[chan].call_counter = call->call_id;
        conn->channels[chan].call_id = call->call_id;
-       rcu_assign_pointer(conn->channels[chan].call, call);
+       conn->channels[chan].call = call;
        spin_unlock(&conn->state_lock);
 
        spin_lock(&conn->peer->lock);
@@ -522,20 +547,17 @@ static void rxrpc_cleanup_ring(struct rxrpc_call *call)
 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
 {
        struct rxrpc_connection *conn = call->conn;
-       bool put = false;
+       bool put = false, putu = false;
 
        _enter("{%d,%d}", call->debug_id, refcount_read(&call->ref));
 
        trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
                         call->flags, rxrpc_call_see_release);
 
-       ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
-
        if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
                BUG();
 
        rxrpc_put_call_slot(call);
-       del_timer_sync(&call->timer);
 
        /* Make sure we don't get any more notifications */
        write_lock(&rx->recvmsg_lock);
@@ -560,7 +582,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
        if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
                rb_erase(&call->sock_node, &rx->calls);
                memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
-               rxrpc_put_call(call, rxrpc_call_put_userid_exists);
+               putu = true;
        }
 
        list_del(&call->sock_link);
@@ -568,10 +590,9 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
 
        _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
 
-       if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
-               rxrpc_disconnect_call(call);
-       if (call->security)
-               call->security->free_call_crypto(call);
+       if (putu)
+               rxrpc_put_call(call, rxrpc_call_put_userid);
+
        _leave("");
 }
 
@@ -588,7 +609,8 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
                call = list_entry(rx->to_be_accepted.next,
                                  struct rxrpc_call, accept_link);
                list_del(&call->accept_link);
-               rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
+               rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET,
+                                   rxrpc_abort_call_sock_release_tba);
                rxrpc_put_call(call, rxrpc_call_put_release_sock_tba);
        }
 
@@ -596,8 +618,8 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
                call = list_entry(rx->sock_calls.next,
                                  struct rxrpc_call, sock_link);
                rxrpc_get_call(call, rxrpc_call_get_release_sock);
-               rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
-               rxrpc_send_abort_packet(call);
+               rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET,
+                                   rxrpc_abort_call_sock_release);
                rxrpc_release_call(rx, call);
                rxrpc_put_call(call, rxrpc_call_put_release_sock);
        }
@@ -620,7 +642,7 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
        dead = __refcount_dec_and_test(&call->ref, &r);
        trace_rxrpc_call(debug_id, r - 1, 0, why);
        if (dead) {
-               ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
+               ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
 
                if (!list_empty(&call->link)) {
                        spin_lock(&rxnet->call_lock);
@@ -669,6 +691,8 @@ static void rxrpc_destroy_call(struct work_struct *work)
 
        rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned);
        rxrpc_put_connection(call->conn, rxrpc_conn_put_call);
+       rxrpc_deactivate_bundle(call->bundle);
+       rxrpc_put_bundle(call->bundle, rxrpc_bundle_put_call);
        rxrpc_put_peer(call->peer, rxrpc_peer_put_call);
        rxrpc_put_local(call->local, rxrpc_local_put_call);
        call_rcu(&call->rcu, rxrpc_rcu_free_call);
@@ -681,7 +705,7 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
 {
        memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
 
-       ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
+       ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
        ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
 
        del_timer(&call->timer);
@@ -719,7 +743,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
 
                        pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
                               call, refcount_read(&call->ref),
-                              rxrpc_call_states[call->state],
+                              rxrpc_call_states[__rxrpc_call_state(call)],
                               call->flags, call->events);
 
                        spin_unlock(&rxnet->call_lock);
diff --git a/net/rxrpc/call_state.c b/net/rxrpc/call_state.c
new file mode 100644 (file)
index 0000000..6afb543
--- /dev/null
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Call state changing functions.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include "ar-internal.h"
+
+/*
+ * Transition a call to the complete state.
+ */
+bool rxrpc_set_call_completion(struct rxrpc_call *call,
+                                enum rxrpc_call_completion compl,
+                                u32 abort_code,
+                                int error)
+{
+       if (__rxrpc_call_state(call) == RXRPC_CALL_COMPLETE)
+               return false;
+
+       call->abort_code = abort_code;
+       call->error = error;
+       call->completion = compl;
+       /* Allow reader of completion state to operate locklessly */
+       rxrpc_set_call_state(call, RXRPC_CALL_COMPLETE);
+       trace_rxrpc_call_complete(call);
+       wake_up(&call->waitq);
+       rxrpc_notify_socket(call);
+       return true;
+}
+
+/*
+ * Record that a call successfully completed.
+ */
+bool rxrpc_call_completed(struct rxrpc_call *call)
+{
+       return rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
+}
+
+/*
+ * Record that a call is locally aborted.
+ */
+bool rxrpc_abort_call(struct rxrpc_call *call, rxrpc_seq_t seq,
+                     u32 abort_code, int error, enum rxrpc_abort_reason why)
+{
+       trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
+                         abort_code, error);
+       if (!rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
+                                      abort_code, error))
+               return false;
+       if (test_bit(RXRPC_CALL_EXPOSED, &call->flags))
+               rxrpc_send_abort_packet(call);
+       return true;
+}
+
+/*
+ * Record that a call errored out before even getting off the ground, thereby
+ * setting the state to allow it to be destroyed.
+ */
+void rxrpc_prefail_call(struct rxrpc_call *call, enum rxrpc_call_completion compl,
+                       int error)
+{
+       call->abort_code        = RX_CALL_DEAD;
+       call->error             = error;
+       call->completion        = compl;
+       call->_state            = RXRPC_CALL_COMPLETE;
+       trace_rxrpc_call_complete(call);
+       WARN_ON_ONCE(__test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags));
+}
index 87efa03..981ca5b 100644 (file)
@@ -34,104 +34,59 @@ __read_mostly unsigned int rxrpc_reap_client_connections = 900;
 __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
 __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
 
-/*
- * We use machine-unique IDs for our client connections.
- */
-DEFINE_IDR(rxrpc_client_conn_ids);
-static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
-
-static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle);
-
-/*
- * Get a connection ID and epoch for a client connection from the global pool.
- * The connection struct pointer is then recorded in the idr radix tree.  The
- * epoch doesn't change until the client is rebooted (or, at least, unless the
- * module is unloaded).
- */
-static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
-                                         gfp_t gfp)
+static void rxrpc_activate_bundle(struct rxrpc_bundle *bundle)
 {
-       struct rxrpc_net *rxnet = conn->rxnet;
-       int id;
-
-       _enter("");
-
-       idr_preload(gfp);
-       spin_lock(&rxrpc_conn_id_lock);
-
-       id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
-                             1, 0x40000000, GFP_NOWAIT);
-       if (id < 0)
-               goto error;
-
-       spin_unlock(&rxrpc_conn_id_lock);
-       idr_preload_end();
-
-       conn->proto.epoch = rxnet->epoch;
-       conn->proto.cid = id << RXRPC_CIDSHIFT;
-       set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
-       _leave(" [CID %x]", conn->proto.cid);
-       return 0;
-
-error:
-       spin_unlock(&rxrpc_conn_id_lock);
-       idr_preload_end();
-       _leave(" = %d", id);
-       return id;
+       atomic_inc(&bundle->active);
 }
 
 /*
- * Release a connection ID for a client connection from the global pool.
+ * Release a connection ID for a client connection.
  */
-static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
+static void rxrpc_put_client_connection_id(struct rxrpc_local *local,
+                                          struct rxrpc_connection *conn)
 {
-       if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
-               spin_lock(&rxrpc_conn_id_lock);
-               idr_remove(&rxrpc_client_conn_ids,
-                          conn->proto.cid >> RXRPC_CIDSHIFT);
-               spin_unlock(&rxrpc_conn_id_lock);
-       }
+       idr_remove(&local->conn_ids, conn->proto.cid >> RXRPC_CIDSHIFT);
 }
 
 /*
  * Destroy the client connection ID tree.
  */
-void rxrpc_destroy_client_conn_ids(void)
+static void rxrpc_destroy_client_conn_ids(struct rxrpc_local *local)
 {
        struct rxrpc_connection *conn;
        int id;
 
-       if (!idr_is_empty(&rxrpc_client_conn_ids)) {
-               idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
+       if (!idr_is_empty(&local->conn_ids)) {
+               idr_for_each_entry(&local->conn_ids, conn, id) {
                        pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
                               conn, refcount_read(&conn->ref));
                }
                BUG();
        }
 
-       idr_destroy(&rxrpc_client_conn_ids);
+       idr_destroy(&local->conn_ids);
 }
 
 /*
  * Allocate a connection bundle.
  */
-static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp,
+static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_call *call,
                                               gfp_t gfp)
 {
        struct rxrpc_bundle *bundle;
 
        bundle = kzalloc(sizeof(*bundle), gfp);
        if (bundle) {
-               bundle->local           = cp->local;
-               bundle->peer            = rxrpc_get_peer(cp->peer, rxrpc_peer_get_bundle);
-               bundle->key             = cp->key;
-               bundle->exclusive       = cp->exclusive;
-               bundle->upgrade         = cp->upgrade;
-               bundle->service_id      = cp->service_id;
-               bundle->security_level  = cp->security_level;
+               bundle->local           = call->local;
+               bundle->peer            = rxrpc_get_peer(call->peer, rxrpc_peer_get_bundle);
+               bundle->key             = key_get(call->key);
+               bundle->security        = call->security;
+               bundle->exclusive       = test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
+               bundle->upgrade         = test_bit(RXRPC_CALL_UPGRADE, &call->flags);
+               bundle->service_id      = call->dest_srx.srx_service;
+               bundle->security_level  = call->security_level;
                refcount_set(&bundle->ref, 1);
                atomic_set(&bundle->active, 1);
-               spin_lock_init(&bundle->channel_lock);
                INIT_LIST_HEAD(&bundle->waiting_calls);
                trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_new);
        }
@@ -152,84 +107,87 @@ static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
 {
        trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_free);
        rxrpc_put_peer(bundle->peer, rxrpc_peer_put_bundle);
+       key_put(bundle->key);
        kfree(bundle);
 }
 
 void rxrpc_put_bundle(struct rxrpc_bundle *bundle, enum rxrpc_bundle_trace why)
 {
-       unsigned int id = bundle->debug_id;
+       unsigned int id;
        bool dead;
        int r;
 
-       dead = __refcount_dec_and_test(&bundle->ref, &r);
-       trace_rxrpc_bundle(id, r - 1, why);
-       if (dead)
-               rxrpc_free_bundle(bundle);
+       if (bundle) {
+               id = bundle->debug_id;
+               dead = __refcount_dec_and_test(&bundle->ref, &r);
+               trace_rxrpc_bundle(id, r - 1, why);
+               if (dead)
+                       rxrpc_free_bundle(bundle);
+       }
+}
+
+/*
+ * Get rid of outstanding client connection preallocations when a local
+ * endpoint is destroyed.
+ */
+void rxrpc_purge_client_connections(struct rxrpc_local *local)
+{
+       rxrpc_destroy_client_conn_ids(local);
 }
 
 /*
  * Allocate a client connection.
  */
 static struct rxrpc_connection *
-rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp)
+rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle)
 {
        struct rxrpc_connection *conn;
-       struct rxrpc_net *rxnet = bundle->local->rxnet;
-       int ret;
+       struct rxrpc_local *local = bundle->local;
+       struct rxrpc_net *rxnet = local->rxnet;
+       int id;
 
        _enter("");
 
-       conn = rxrpc_alloc_connection(rxnet, gfp);
-       if (!conn) {
-               _leave(" = -ENOMEM");
+       conn = rxrpc_alloc_connection(rxnet, GFP_ATOMIC | __GFP_NOWARN);
+       if (!conn)
                return ERR_PTR(-ENOMEM);
+
+       id = idr_alloc_cyclic(&local->conn_ids, conn, 1, 0x40000000,
+                             GFP_ATOMIC | __GFP_NOWARN);
+       if (id < 0) {
+               kfree(conn);
+               return ERR_PTR(id);
        }
 
        refcount_set(&conn->ref, 1);
-       conn->bundle            = bundle;
-       conn->local             = bundle->local;
-       conn->peer              = bundle->peer;
-       conn->key               = bundle->key;
+       conn->proto.cid         = id << RXRPC_CIDSHIFT;
+       conn->proto.epoch       = local->rxnet->epoch;
+       conn->out_clientflag    = RXRPC_CLIENT_INITIATED;
+       conn->bundle            = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_conn);
+       conn->local             = rxrpc_get_local(bundle->local, rxrpc_local_get_client_conn);
+       conn->peer              = rxrpc_get_peer(bundle->peer, rxrpc_peer_get_client_conn);
+       conn->key               = key_get(bundle->key);
+       conn->security          = bundle->security;
        conn->exclusive         = bundle->exclusive;
        conn->upgrade           = bundle->upgrade;
        conn->orig_service_id   = bundle->service_id;
        conn->security_level    = bundle->security_level;
-       conn->out_clientflag    = RXRPC_CLIENT_INITIATED;
-       conn->state             = RXRPC_CONN_CLIENT;
+       conn->state             = RXRPC_CONN_CLIENT_UNSECURED;
        conn->service_id        = conn->orig_service_id;
 
-       ret = rxrpc_get_client_connection_id(conn, gfp);
-       if (ret < 0)
-               goto error_0;
-
-       ret = rxrpc_init_client_conn_security(conn);
-       if (ret < 0)
-               goto error_1;
+       if (conn->security == &rxrpc_no_security)
+               conn->state     = RXRPC_CONN_CLIENT;
 
        atomic_inc(&rxnet->nr_conns);
        write_lock(&rxnet->conn_lock);
        list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
        write_unlock(&rxnet->conn_lock);
 
-       rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_conn);
-       rxrpc_get_peer(conn->peer, rxrpc_peer_get_client_conn);
-       rxrpc_get_local(conn->local, rxrpc_local_get_client_conn);
-       key_get(conn->key);
-
-       trace_rxrpc_conn(conn->debug_id, refcount_read(&conn->ref),
-                        rxrpc_conn_new_client);
+       rxrpc_see_connection(conn, rxrpc_conn_new_client);
 
        atomic_inc(&rxnet->nr_client_conns);
        trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
-       _leave(" = %p", conn);
        return conn;
-
-error_1:
-       rxrpc_put_client_connection_id(conn);
-error_0:
-       kfree(conn);
-       _leave(" = %d", ret);
-       return ERR_PTR(ret);
 }
 
 /*
@@ -247,7 +205,8 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
        if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
                goto dont_reuse;
 
-       if (conn->state != RXRPC_CONN_CLIENT ||
+       if ((conn->state != RXRPC_CONN_CLIENT_UNSECURED &&
+            conn->state != RXRPC_CONN_CLIENT) ||
            conn->proto.epoch != rxnet->epoch)
                goto mark_dont_reuse;
 
@@ -257,7 +216,7 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
         * times the maximum number of client conns away from the current
         * allocation point to try and keep the IDs concentrated.
         */
-       id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
+       id_cursor = idr_get_cursor(&conn->local->conn_ids);
        id = conn->proto.cid >> RXRPC_CIDSHIFT;
        distance = id - id_cursor;
        if (distance < 0)
@@ -278,20 +237,23 @@ dont_reuse:
  * Look up the conn bundle that matches the connection parameters, adding it if
  * it doesn't yet exist.
  */
-static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *cp,
-                                                gfp_t gfp)
+int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp)
 {
        static atomic_t rxrpc_bundle_id;
        struct rxrpc_bundle *bundle, *candidate;
-       struct rxrpc_local *local = cp->local;
+       struct rxrpc_local *local = call->local;
        struct rb_node *p, **pp, *parent;
        long diff;
+       bool upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags);
 
        _enter("{%px,%x,%u,%u}",
-              cp->peer, key_serial(cp->key), cp->security_level, cp->upgrade);
+              call->peer, key_serial(call->key), call->security_level,
+              upgrade);
 
-       if (cp->exclusive)
-               return rxrpc_alloc_bundle(cp, gfp);
+       if (test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags)) {
+               call->bundle = rxrpc_alloc_bundle(call, gfp);
+               return call->bundle ? 0 : -ENOMEM;
+       }
 
        /* First, see if the bundle is already there. */
        _debug("search 1");
@@ -300,11 +262,11 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
        while (p) {
                bundle = rb_entry(p, struct rxrpc_bundle, local_node);
 
-#define cmp(X) ((long)bundle->X - (long)cp->X)
-               diff = (cmp(peer) ?:
-                       cmp(key) ?:
-                       cmp(security_level) ?:
-                       cmp(upgrade));
+#define cmp(X, Y) ((long)(X) - (long)(Y))
+               diff = (cmp(bundle->peer, call->peer) ?:
+                       cmp(bundle->key, call->key) ?:
+                       cmp(bundle->security_level, call->security_level) ?:
+                       cmp(bundle->upgrade, upgrade));
 #undef cmp
                if (diff < 0)
                        p = p->rb_left;
@@ -317,9 +279,9 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
        _debug("not found");
 
        /* It wasn't.  We need to add one. */
-       candidate = rxrpc_alloc_bundle(cp, gfp);
+       candidate = rxrpc_alloc_bundle(call, gfp);
        if (!candidate)
-               return NULL;
+               return -ENOMEM;
 
        _debug("search 2");
        spin_lock(&local->client_bundles_lock);
@@ -329,11 +291,11 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
                parent = *pp;
                bundle = rb_entry(parent, struct rxrpc_bundle, local_node);
 
-#define cmp(X) ((long)bundle->X - (long)cp->X)
-               diff = (cmp(peer) ?:
-                       cmp(key) ?:
-                       cmp(security_level) ?:
-                       cmp(upgrade));
+#define cmp(X, Y) ((long)(X) - (long)(Y))
+               diff = (cmp(bundle->peer, call->peer) ?:
+                       cmp(bundle->key, call->key) ?:
+                       cmp(bundle->security_level, call->security_level) ?:
+                       cmp(bundle->upgrade, upgrade));
 #undef cmp
                if (diff < 0)
                        pp = &(*pp)->rb_left;
@@ -347,178 +309,89 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
        candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id);
        rb_link_node(&candidate->local_node, parent, pp);
        rb_insert_color(&candidate->local_node, &local->client_bundles);
-       rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call);
+       call->bundle = rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call);
        spin_unlock(&local->client_bundles_lock);
-       _leave(" = %u [new]", candidate->debug_id);
-       return candidate;
+       _leave(" = B=%u [new]", call->bundle->debug_id);
+       return 0;
 
 found_bundle_free:
        rxrpc_free_bundle(candidate);
 found_bundle:
-       rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_call);
-       atomic_inc(&bundle->active);
+       call->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_call);
+       rxrpc_activate_bundle(bundle);
        spin_unlock(&local->client_bundles_lock);
-       _leave(" = %u [found]", bundle->debug_id);
-       return bundle;
-}
-
-/*
- * Create or find a client bundle to use for a call.
- *
- * If we return with a connection, the call will be on its waiting list.  It's
- * left to the caller to assign a channel and wake up the call.
- */
-static struct rxrpc_bundle *rxrpc_prep_call(struct rxrpc_sock *rx,
-                                           struct rxrpc_call *call,
-                                           struct rxrpc_conn_parameters *cp,
-                                           struct sockaddr_rxrpc *srx,
-                                           gfp_t gfp)
-{
-       struct rxrpc_bundle *bundle;
-
-       _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
-
-       cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
-       if (!cp->peer)
-               goto error;
-
-       call->tx_last_sent = ktime_get_real();
-       call->cong_ssthresh = cp->peer->cong_ssthresh;
-       if (call->cong_cwnd >= call->cong_ssthresh)
-               call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
-       else
-               call->cong_mode = RXRPC_CALL_SLOW_START;
-       if (cp->upgrade)
-               __set_bit(RXRPC_CALL_UPGRADE, &call->flags);
-
-       /* Find the client connection bundle. */
-       bundle = rxrpc_look_up_bundle(cp, gfp);
-       if (!bundle)
-               goto error;
-
-       /* Get this call queued.  Someone else may activate it whilst we're
-        * lining up a new connection, but that's fine.
-        */
-       spin_lock(&bundle->channel_lock);
-       list_add_tail(&call->chan_wait_link, &bundle->waiting_calls);
-       spin_unlock(&bundle->channel_lock);
-
-       _leave(" = [B=%x]", bundle->debug_id);
-       return bundle;
-
-error:
-       _leave(" = -ENOMEM");
-       return ERR_PTR(-ENOMEM);
+       _leave(" = B=%u [found]", call->bundle->debug_id);
+       return 0;
 }
 
 /*
  * Allocate a new connection and add it into a bundle.
  */
-static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp)
-       __releases(bundle->channel_lock)
+static bool rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle,
+                                    unsigned int slot)
 {
-       struct rxrpc_connection *candidate = NULL, *old = NULL;
-       bool conflict;
-       int i;
-
-       _enter("");
-
-       conflict = bundle->alloc_conn;
-       if (!conflict)
-               bundle->alloc_conn = true;
-       spin_unlock(&bundle->channel_lock);
-       if (conflict) {
-               _leave(" [conf]");
-               return;
-       }
-
-       candidate = rxrpc_alloc_client_connection(bundle, gfp);
-
-       spin_lock(&bundle->channel_lock);
-       bundle->alloc_conn = false;
-
-       if (IS_ERR(candidate)) {
-               bundle->alloc_error = PTR_ERR(candidate);
-               spin_unlock(&bundle->channel_lock);
-               _leave(" [err %ld]", PTR_ERR(candidate));
-               return;
-       }
-
-       bundle->alloc_error = 0;
-
-       for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) {
-               unsigned int shift = i * RXRPC_MAXCALLS;
-               int j;
-
-               old = bundle->conns[i];
-               if (!rxrpc_may_reuse_conn(old)) {
-                       if (old)
-                               trace_rxrpc_client(old, -1, rxrpc_client_replace);
-                       candidate->bundle_shift = shift;
-                       atomic_inc(&bundle->active);
-                       bundle->conns[i] = candidate;
-                       for (j = 0; j < RXRPC_MAXCALLS; j++)
-                               set_bit(shift + j, &bundle->avail_chans);
-                       candidate = NULL;
-                       break;
-               }
+       struct rxrpc_connection *conn, *old;
+       unsigned int shift = slot * RXRPC_MAXCALLS;
+       unsigned int i;
 
-               old = NULL;
+       old = bundle->conns[slot];
+       if (old) {
+               bundle->conns[slot] = NULL;
+               trace_rxrpc_client(old, -1, rxrpc_client_replace);
+               rxrpc_put_connection(old, rxrpc_conn_put_noreuse);
        }
 
-       spin_unlock(&bundle->channel_lock);
-
-       if (candidate) {
-               _debug("discard C=%x", candidate->debug_id);
-               trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
-               rxrpc_put_connection(candidate, rxrpc_conn_put_discard);
+       conn = rxrpc_alloc_client_connection(bundle);
+       if (IS_ERR(conn)) {
+               bundle->alloc_error = PTR_ERR(conn);
+               return false;
        }
 
-       rxrpc_put_connection(old, rxrpc_conn_put_noreuse);
-       _leave("");
+       rxrpc_activate_bundle(bundle);
+       conn->bundle_shift = shift;
+       bundle->conns[slot] = conn;
+       for (i = 0; i < RXRPC_MAXCALLS; i++)
+               set_bit(shift + i, &bundle->avail_chans);
+       return true;
 }
 
 /*
  * Add a connection to a bundle if there are no usable connections or we have
  * connections waiting for extra capacity.
  */
-static void rxrpc_maybe_add_conn(struct rxrpc_bundle *bundle, gfp_t gfp)
+static bool rxrpc_bundle_has_space(struct rxrpc_bundle *bundle)
 {
-       struct rxrpc_call *call;
-       int i, usable;
+       int slot = -1, i, usable;
 
        _enter("");
 
-       spin_lock(&bundle->channel_lock);
+       bundle->alloc_error = 0;
 
        /* See if there are any usable connections. */
        usable = 0;
-       for (i = 0; i < ARRAY_SIZE(bundle->conns); i++)
+       for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) {
                if (rxrpc_may_reuse_conn(bundle->conns[i]))
                        usable++;
-
-       if (!usable && !list_empty(&bundle->waiting_calls)) {
-               call = list_first_entry(&bundle->waiting_calls,
-                                       struct rxrpc_call, chan_wait_link);
-               if (test_bit(RXRPC_CALL_UPGRADE, &call->flags))
-                       bundle->try_upgrade = true;
+               else if (slot == -1)
+                       slot = i;
        }
 
+       if (!usable && bundle->upgrade)
+               bundle->try_upgrade = true;
+
        if (!usable)
                goto alloc_conn;
 
        if (!bundle->avail_chans &&
            !bundle->try_upgrade &&
-           !list_empty(&bundle->waiting_calls) &&
            usable < ARRAY_SIZE(bundle->conns))
                goto alloc_conn;
 
-       spin_unlock(&bundle->channel_lock);
        _leave("");
-       return;
+       return usable;
 
 alloc_conn:
-       return rxrpc_add_conn_to_bundle(bundle, gfp);
+       return slot >= 0 ? rxrpc_add_conn_to_bundle(bundle, slot) : false;
 }
 
 /*
@@ -532,11 +405,13 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
        struct rxrpc_channel *chan = &conn->channels[channel];
        struct rxrpc_bundle *bundle = conn->bundle;
        struct rxrpc_call *call = list_entry(bundle->waiting_calls.next,
-                                            struct rxrpc_call, chan_wait_link);
+                                            struct rxrpc_call, wait_link);
        u32 call_id = chan->call_counter + 1;
 
        _enter("C=%x,%u", conn->debug_id, channel);
 
+       list_del_init(&call->wait_link);
+
        trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
 
        /* Cancel the final ACK on the previous call if it hasn't been sent yet
@@ -546,68 +421,50 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
        clear_bit(conn->bundle_shift + channel, &bundle->avail_chans);
 
        rxrpc_see_call(call, rxrpc_call_see_activate_client);
-       list_del_init(&call->chan_wait_link);
-       call->peer      = rxrpc_get_peer(conn->peer, rxrpc_peer_get_activate_call);
        call->conn      = rxrpc_get_connection(conn, rxrpc_conn_get_activate_call);
        call->cid       = conn->proto.cid | channel;
        call->call_id   = call_id;
        call->dest_srx.srx_service = conn->service_id;
-
-       trace_rxrpc_connect_call(call);
-
-       write_lock(&call->state_lock);
-       call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
-       write_unlock(&call->state_lock);
-
-       /* Paired with the read barrier in rxrpc_connect_call().  This orders
-        * cid and epoch in the connection wrt to call_id without the need to
-        * take the channel_lock.
-        *
-        * We provisionally assign a callNumber at this point, but we don't
-        * confirm it until the call is about to be exposed.
-        *
-        * TODO: Pair with a barrier in the data_ready handler when that looks
-        * at the call ID through a connection channel.
-        */
-       smp_wmb();
+       call->cong_ssthresh = call->peer->cong_ssthresh;
+       if (call->cong_cwnd >= call->cong_ssthresh)
+               call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
+       else
+               call->cong_mode = RXRPC_CALL_SLOW_START;
 
        chan->call_id           = call_id;
        chan->call_debug_id     = call->debug_id;
-       rcu_assign_pointer(chan->call, call);
+       chan->call              = call;
+
+       rxrpc_see_call(call, rxrpc_call_see_connected);
+       trace_rxrpc_connect_call(call);
+       call->tx_last_sent = ktime_get_real();
+       rxrpc_start_call_timer(call);
+       rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_SEND_REQUEST);
        wake_up(&call->waitq);
 }
 
 /*
  * Remove a connection from the idle list if it's on it.
  */
-static void rxrpc_unidle_conn(struct rxrpc_bundle *bundle, struct rxrpc_connection *conn)
+static void rxrpc_unidle_conn(struct rxrpc_connection *conn)
 {
-       struct rxrpc_net *rxnet = bundle->local->rxnet;
-       bool drop_ref;
-
        if (!list_empty(&conn->cache_link)) {
-               drop_ref = false;
-               spin_lock(&rxnet->client_conn_cache_lock);
-               if (!list_empty(&conn->cache_link)) {
-                       list_del_init(&conn->cache_link);
-                       drop_ref = true;
-               }
-               spin_unlock(&rxnet->client_conn_cache_lock);
-               if (drop_ref)
-                       rxrpc_put_connection(conn, rxrpc_conn_put_unidle);
+               list_del_init(&conn->cache_link);
+               rxrpc_put_connection(conn, rxrpc_conn_put_unidle);
        }
 }
 
 /*
- * Assign channels and callNumbers to waiting calls with channel_lock
- * held by caller.
+ * Assign channels and callNumbers to waiting calls.
  */
-static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle)
+static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
 {
        struct rxrpc_connection *conn;
        unsigned long avail, mask;
        unsigned int channel, slot;
 
+       trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans);
+
        if (bundle->try_upgrade)
                mask = 1;
        else
@@ -627,7 +484,7 @@ static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle)
 
                if (bundle->try_upgrade)
                        set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
-               rxrpc_unidle_conn(bundle, conn);
+               rxrpc_unidle_conn(conn);
 
                channel &= (RXRPC_MAXCALLS - 1);
                conn->act_chans |= 1 << channel;
@@ -636,132 +493,24 @@ static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle)
 }
 
 /*
- * Assign channels and callNumbers to waiting calls.
- */
-static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
-{
-       _enter("B=%x", bundle->debug_id);
-
-       trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans);
-
-       if (!bundle->avail_chans)
-               return;
-
-       spin_lock(&bundle->channel_lock);
-       rxrpc_activate_channels_locked(bundle);
-       spin_unlock(&bundle->channel_lock);
-       _leave("");
-}
-
-/*
- * Wait for a callNumber and a channel to be granted to a call.
- */
-static int rxrpc_wait_for_channel(struct rxrpc_bundle *bundle,
-                                 struct rxrpc_call *call, gfp_t gfp)
-{
-       DECLARE_WAITQUEUE(myself, current);
-       int ret = 0;
-
-       _enter("%d", call->debug_id);
-
-       if (!gfpflags_allow_blocking(gfp)) {
-               rxrpc_maybe_add_conn(bundle, gfp);
-               rxrpc_activate_channels(bundle);
-               ret = bundle->alloc_error ?: -EAGAIN;
-               goto out;
-       }
-
-       add_wait_queue_exclusive(&call->waitq, &myself);
-       for (;;) {
-               rxrpc_maybe_add_conn(bundle, gfp);
-               rxrpc_activate_channels(bundle);
-               ret = bundle->alloc_error;
-               if (ret < 0)
-                       break;
-
-               switch (call->interruptibility) {
-               case RXRPC_INTERRUPTIBLE:
-               case RXRPC_PREINTERRUPTIBLE:
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       break;
-               case RXRPC_UNINTERRUPTIBLE:
-               default:
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       break;
-               }
-               if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_AWAIT_CONN)
-                       break;
-               if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
-                    call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
-                   signal_pending(current)) {
-                       ret = -ERESTARTSYS;
-                       break;
-               }
-               schedule();
-       }
-       remove_wait_queue(&call->waitq, &myself);
-       __set_current_state(TASK_RUNNING);
-
-out:
-       _leave(" = %d", ret);
-       return ret;
-}
-
-/*
- * find a connection for a call
- * - called in process context with IRQs enabled
+ * Connect waiting channels (called from the I/O thread).
  */
-int rxrpc_connect_call(struct rxrpc_sock *rx,
-                      struct rxrpc_call *call,
-                      struct rxrpc_conn_parameters *cp,
-                      struct sockaddr_rxrpc *srx,
-                      gfp_t gfp)
+void rxrpc_connect_client_calls(struct rxrpc_local *local)
 {
-       struct rxrpc_bundle *bundle;
-       struct rxrpc_net *rxnet = cp->local->rxnet;
-       int ret = 0;
-
-       _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
-
-       rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
+       struct rxrpc_call *call;
 
-       bundle = rxrpc_prep_call(rx, call, cp, srx, gfp);
-       if (IS_ERR(bundle)) {
-               ret = PTR_ERR(bundle);
-               goto out;
-       }
+       while ((call = list_first_entry_or_null(&local->new_client_calls,
+                                               struct rxrpc_call, wait_link))
+              ) {
+               struct rxrpc_bundle *bundle = call->bundle;
 
-       if (call->state == RXRPC_CALL_CLIENT_AWAIT_CONN) {
-               ret = rxrpc_wait_for_channel(bundle, call, gfp);
-               if (ret < 0)
-                       goto wait_failed;
-       }
+               spin_lock(&local->client_call_lock);
+               list_move_tail(&call->wait_link, &bundle->waiting_calls);
+               spin_unlock(&local->client_call_lock);
 
-granted_channel:
-       /* Paired with the write barrier in rxrpc_activate_one_channel(). */
-       smp_rmb();
-
-out_put_bundle:
-       rxrpc_deactivate_bundle(bundle);
-       rxrpc_put_bundle(bundle, rxrpc_bundle_get_client_call);
-out:
-       _leave(" = %d", ret);
-       return ret;
-
-wait_failed:
-       spin_lock(&bundle->channel_lock);
-       list_del_init(&call->chan_wait_link);
-       spin_unlock(&bundle->channel_lock);
-
-       if (call->state != RXRPC_CALL_CLIENT_AWAIT_CONN) {
-               ret = 0;
-               goto granted_channel;
+               if (rxrpc_bundle_has_space(bundle))
+                       rxrpc_activate_channels(bundle);
        }
-
-       trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
-       rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
-       rxrpc_disconnect_client_call(bundle, call);
-       goto out_put_bundle;
 }
 
 /*
@@ -794,14 +543,14 @@ void rxrpc_expose_client_call(struct rxrpc_call *call)
 /*
  * Set the reap timer.
  */
-static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
+static void rxrpc_set_client_reap_timer(struct rxrpc_local *local)
 {
-       if (!rxnet->kill_all_client_conns) {
+       if (!local->kill_all_client_conns) {
                unsigned long now = jiffies;
                unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
 
-               if (rxnet->live)
-                       timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
+               if (local->rxnet->live)
+                       timer_reduce(&local->client_conn_reap_timer, reap_at);
        }
 }
 
@@ -812,16 +561,13 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
 {
        struct rxrpc_connection *conn;
        struct rxrpc_channel *chan = NULL;
-       struct rxrpc_net *rxnet = bundle->local->rxnet;
+       struct rxrpc_local *local = bundle->local;
        unsigned int channel;
        bool may_reuse;
        u32 cid;
 
        _enter("c=%x", call->debug_id);
 
-       spin_lock(&bundle->channel_lock);
-       set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
-
        /* Calls that have never actually been assigned a channel can simply be
         * discarded.
         */
@@ -830,8 +576,8 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
                _debug("call is waiting");
                ASSERTCMP(call->call_id, ==, 0);
                ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
-               list_del_init(&call->chan_wait_link);
-               goto out;
+               list_del_init(&call->wait_link);
+               return;
        }
 
        cid = call->cid;
@@ -839,10 +585,8 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
        chan = &conn->channels[channel];
        trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
 
-       if (rcu_access_pointer(chan->call) != call) {
-               spin_unlock(&bundle->channel_lock);
-               BUG();
-       }
+       if (WARN_ON(chan->call != call))
+               return;
 
        may_reuse = rxrpc_may_reuse_conn(conn);
 
@@ -863,16 +607,15 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
                        trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
                        bundle->try_upgrade = false;
                        if (may_reuse)
-                               rxrpc_activate_channels_locked(bundle);
+                               rxrpc_activate_channels(bundle);
                }
-
        }
 
        /* See if we can pass the channel directly to another call. */
        if (may_reuse && !list_empty(&bundle->waiting_calls)) {
                trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
                rxrpc_activate_one_channel(conn, channel);
-               goto out;
+               return;
        }
 
        /* Schedule the final ACK to be transmitted in a short while so that it
@@ -890,7 +633,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
        }
 
        /* Deactivate the channel. */
-       rcu_assign_pointer(chan->call, NULL);
+       chan->call = NULL;
        set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans);
        conn->act_chans &= ~(1 << channel);
 
@@ -903,17 +646,10 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
                conn->idle_timestamp = jiffies;
 
                rxrpc_get_connection(conn, rxrpc_conn_get_idle);
-               spin_lock(&rxnet->client_conn_cache_lock);
-               list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
-               spin_unlock(&rxnet->client_conn_cache_lock);
+               list_move_tail(&conn->cache_link, &local->idle_client_conns);
 
-               rxrpc_set_client_reap_timer(rxnet);
+               rxrpc_set_client_reap_timer(local);
        }
-
-out:
-       spin_unlock(&bundle->channel_lock);
-       _leave("");
-       return;
 }
 
 /*
@@ -923,7 +659,6 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
 {
        struct rxrpc_bundle *bundle = conn->bundle;
        unsigned int bindex;
-       bool need_drop = false;
        int i;
 
        _enter("C=%x", conn->debug_id);
@@ -931,18 +666,13 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
        if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
                rxrpc_process_delayed_final_acks(conn, true);
 
-       spin_lock(&bundle->channel_lock);
        bindex = conn->bundle_shift / RXRPC_MAXCALLS;
        if (bundle->conns[bindex] == conn) {
                _debug("clear slot %u", bindex);
                bundle->conns[bindex] = NULL;
                for (i = 0; i < RXRPC_MAXCALLS; i++)
                        clear_bit(conn->bundle_shift + i, &bundle->avail_chans);
-               need_drop = true;
-       }
-       spin_unlock(&bundle->channel_lock);
-
-       if (need_drop) {
+               rxrpc_put_client_connection_id(bundle->local, conn);
                rxrpc_deactivate_bundle(bundle);
                rxrpc_put_connection(conn, rxrpc_conn_put_unbundle);
        }
@@ -951,11 +681,15 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
 /*
  * Drop the active count on a bundle.
  */
-static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle)
+void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle)
 {
-       struct rxrpc_local *local = bundle->local;
+       struct rxrpc_local *local;
        bool need_put = false;
 
+       if (!bundle)
+               return;
+
+       local = bundle->local;
        if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) {
                if (!bundle->exclusive) {
                        _debug("erase bundle");
@@ -982,7 +716,7 @@ void rxrpc_kill_client_conn(struct rxrpc_connection *conn)
        trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
        atomic_dec(&rxnet->nr_client_conns);
 
-       rxrpc_put_client_connection_id(conn);
+       rxrpc_put_client_connection_id(local, conn);
 }
 
 /*
@@ -992,42 +726,26 @@ void rxrpc_kill_client_conn(struct rxrpc_connection *conn)
  * This may be called from conn setup or from a work item so cannot be
  * considered non-reentrant.
  */
-void rxrpc_discard_expired_client_conns(struct work_struct *work)
+void rxrpc_discard_expired_client_conns(struct rxrpc_local *local)
 {
        struct rxrpc_connection *conn;
-       struct rxrpc_net *rxnet =
-               container_of(work, struct rxrpc_net, client_conn_reaper);
        unsigned long expiry, conn_expires_at, now;
        unsigned int nr_conns;
 
        _enter("");
 
-       if (list_empty(&rxnet->idle_client_conns)) {
-               _leave(" [empty]");
-               return;
-       }
-
-       /* Don't double up on the discarding */
-       if (!mutex_trylock(&rxnet->client_conn_discard_lock)) {
-               _leave(" [already]");
-               return;
-       }
-
        /* We keep an estimate of what the number of conns ought to be after
         * we've discarded some so that we don't overdo the discarding.
         */
-       nr_conns = atomic_read(&rxnet->nr_client_conns);
+       nr_conns = atomic_read(&local->rxnet->nr_client_conns);
 
 next:
-       spin_lock(&rxnet->client_conn_cache_lock);
-
-       if (list_empty(&rxnet->idle_client_conns))
-               goto out;
-
-       conn = list_entry(rxnet->idle_client_conns.next,
-                         struct rxrpc_connection, cache_link);
+       conn = list_first_entry_or_null(&local->idle_client_conns,
+                                       struct rxrpc_connection, cache_link);
+       if (!conn)
+               return;
 
-       if (!rxnet->kill_all_client_conns) {
+       if (!local->kill_all_client_conns) {
                /* If the number of connections is over the reap limit, we
                 * expedite discard by reducing the expiry timeout.  We must,
                 * however, have at least a short grace period to be able to do
@@ -1050,8 +768,6 @@ next:
        trace_rxrpc_client(conn, -1, rxrpc_client_discard);
        list_del_init(&conn->cache_link);
 
-       spin_unlock(&rxnet->client_conn_cache_lock);
-
        rxrpc_unbundle_conn(conn);
        /* Drop the ->cache_link ref */
        rxrpc_put_connection(conn, rxrpc_conn_put_discard_idle);
@@ -1068,31 +784,8 @@ not_yet_expired:
         * then things get messier.
         */
        _debug("not yet");
-       if (!rxnet->kill_all_client_conns)
-               timer_reduce(&rxnet->client_conn_reap_timer, conn_expires_at);
-
-out:
-       spin_unlock(&rxnet->client_conn_cache_lock);
-       mutex_unlock(&rxnet->client_conn_discard_lock);
-       _leave("");
-}
-
-/*
- * Preemptively destroy all the client connection records rather than waiting
- * for them to time out
- */
-void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
-{
-       _enter("");
-
-       spin_lock(&rxnet->client_conn_cache_lock);
-       rxnet->kill_all_client_conns = true;
-       spin_unlock(&rxnet->client_conn_cache_lock);
-
-       del_timer_sync(&rxnet->client_conn_reap_timer);
-
-       if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
-               _debug("destroy: queue failed");
+       if (!local->kill_all_client_conns)
+               timer_reduce(&local->client_conn_reap_timer, conn_expires_at);
 
        _leave("");
 }
@@ -1102,29 +795,19 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
  */
 void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
 {
-       struct rxrpc_connection *conn, *tmp;
-       struct rxrpc_net *rxnet = local->rxnet;
-       LIST_HEAD(graveyard);
+       struct rxrpc_connection *conn;
 
        _enter("");
 
-       spin_lock(&rxnet->client_conn_cache_lock);
-
-       list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
-                                cache_link) {
-               if (conn->local == local) {
-                       atomic_dec(&conn->active);
-                       trace_rxrpc_client(conn, -1, rxrpc_client_discard);
-                       list_move(&conn->cache_link, &graveyard);
-               }
-       }
+       local->kill_all_client_conns = true;
 
-       spin_unlock(&rxnet->client_conn_cache_lock);
+       del_timer_sync(&local->client_conn_reap_timer);
 
-       while (!list_empty(&graveyard)) {
-               conn = list_entry(graveyard.next,
-                                 struct rxrpc_connection, cache_link);
+       while ((conn = list_first_entry_or_null(&local->idle_client_conns,
+                                               struct rxrpc_connection, cache_link))) {
                list_del_init(&conn->cache_link);
+               atomic_dec(&conn->active);
+               trace_rxrpc_client(conn, -1, rxrpc_client_discard);
                rxrpc_unbundle_conn(conn);
                rxrpc_put_connection(conn, rxrpc_conn_put_local_dead);
        }
index 480364b..44414e7 100644 (file)
 #include "ar-internal.h"
 
 /*
+ * Set the completion state on an aborted connection.
+ */
+static bool rxrpc_set_conn_aborted(struct rxrpc_connection *conn, struct sk_buff *skb,
+                                  s32 abort_code, int err,
+                                  enum rxrpc_call_completion compl)
+{
+       bool aborted = false;
+
+       if (conn->state != RXRPC_CONN_ABORTED) {
+               spin_lock(&conn->state_lock);
+               if (conn->state != RXRPC_CONN_ABORTED) {
+                       conn->abort_code = abort_code;
+                       conn->error      = err;
+                       conn->completion = compl;
+                       /* Order the abort info before the state change. */
+                       smp_store_release(&conn->state, RXRPC_CONN_ABORTED);
+                       set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
+                       set_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events);
+                       aborted = true;
+               }
+               spin_unlock(&conn->state_lock);
+       }
+
+       return aborted;
+}
+
+/*
+ * Mark a socket buffer to indicate that the connection it's on should be aborted.
+ */
+int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb,
+                    s32 abort_code, int err, enum rxrpc_abort_reason why)
+{
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+       if (rxrpc_set_conn_aborted(conn, skb, abort_code, err,
+                                  RXRPC_CALL_LOCALLY_ABORTED)) {
+               trace_rxrpc_abort(0, why, sp->hdr.cid, sp->hdr.callNumber,
+                                 sp->hdr.seq, abort_code, err);
+               rxrpc_poke_conn(conn, rxrpc_conn_get_poke_abort);
+       }
+       return -EPROTO;
+}
+
+/*
+ * Mark a connection as being remotely aborted.
+ */
+static bool rxrpc_input_conn_abort(struct rxrpc_connection *conn,
+                                  struct sk_buff *skb)
+{
+       return rxrpc_set_conn_aborted(conn, skb, skb->priority, -ECONNABORTED,
+                                     RXRPC_CALL_REMOTELY_ABORTED);
+}
+
+/*
  * Retransmit terminal ACK or ABORT of the previous call.
  */
-static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
-                                      struct sk_buff *skb,
-                                      unsigned int channel)
+void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
+                               struct sk_buff *skb,
+                               unsigned int channel)
 {
        struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
        struct rxrpc_channel *chan;
@@ -46,9 +100,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        /* If the last call got moved on whilst we were waiting to run, just
         * ignore this packet.
         */
-       call_id = READ_ONCE(chan->last_call);
-       /* Sync with __rxrpc_disconnect_call() */
-       smp_rmb();
+       call_id = chan->last_call;
        if (skb && call_id != sp->hdr.callNumber)
                return;
 
@@ -65,9 +117,12 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        iov[2].iov_base = &ack_info;
        iov[2].iov_len  = sizeof(ack_info);
 
+       serial = atomic_inc_return(&conn->serial);
+
        pkt.whdr.epoch          = htonl(conn->proto.epoch);
        pkt.whdr.cid            = htonl(conn->proto.cid | channel);
        pkt.whdr.callNumber     = htonl(call_id);
+       pkt.whdr.serial         = htonl(serial);
        pkt.whdr.seq            = 0;
        pkt.whdr.type           = chan->last_type;
        pkt.whdr.flags          = conn->out_clientflag;
@@ -104,31 +159,15 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
                iov[0].iov_len += sizeof(pkt.ack);
                len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
                ioc = 3;
-               break;
-
-       default:
-               return;
-       }
-
-       /* Resync with __rxrpc_disconnect_call() and check that the last call
-        * didn't get advanced whilst we were filling out the packets.
-        */
-       smp_rmb();
-       if (READ_ONCE(chan->last_call) != call_id)
-               return;
-
-       serial = atomic_inc_return(&conn->serial);
-       pkt.whdr.serial = htonl(serial);
 
-       switch (chan->last_type) {
-       case RXRPC_PACKET_TYPE_ABORT:
-               break;
-       case RXRPC_PACKET_TYPE_ACK:
                trace_rxrpc_tx_ack(chan->call_debug_id, serial,
                                   ntohl(pkt.ack.firstPacket),
                                   ntohl(pkt.ack.serial),
                                   pkt.ack.reason, 0);
                break;
+
+       default:
+               return;
        }
 
        ret = kernel_sendmsg(conn->local->socket, &msg, iov, ioc, len);
@@ -146,131 +185,34 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
 /*
  * pass a connection-level abort onto all calls on that connection
  */
-static void rxrpc_abort_calls(struct rxrpc_connection *conn,
-                             enum rxrpc_call_completion compl,
-                             rxrpc_serial_t serial)
+static void rxrpc_abort_calls(struct rxrpc_connection *conn)
 {
        struct rxrpc_call *call;
        int i;
 
        _enter("{%d},%x", conn->debug_id, conn->abort_code);
 
-       spin_lock(&conn->bundle->channel_lock);
-
        for (i = 0; i < RXRPC_MAXCALLS; i++) {
-               call = rcu_dereference_protected(
-                       conn->channels[i].call,
-                       lockdep_is_held(&conn->bundle->channel_lock));
-               if (call) {
-                       if (compl == RXRPC_CALL_LOCALLY_ABORTED)
-                               trace_rxrpc_abort(call->debug_id,
-                                                 "CON", call->cid,
-                                                 call->call_id, 0,
+               call = conn->channels[i].call;
+               if (call)
+                       rxrpc_set_call_completion(call,
+                                                 conn->completion,
                                                  conn->abort_code,
                                                  conn->error);
-                       else
-                               trace_rxrpc_rx_abort(call, serial,
-                                                    conn->abort_code);
-                       rxrpc_set_call_completion(call, compl,
-                                                 conn->abort_code,
-                                                 conn->error);
-               }
        }
 
-       spin_unlock(&conn->bundle->channel_lock);
        _leave("");
 }
 
 /*
- * generate a connection-level abort
- */
-static int rxrpc_abort_connection(struct rxrpc_connection *conn,
-                                 int error, u32 abort_code)
-{
-       struct rxrpc_wire_header whdr;
-       struct msghdr msg;
-       struct kvec iov[2];
-       __be32 word;
-       size_t len;
-       u32 serial;
-       int ret;
-
-       _enter("%d,,%u,%u", conn->debug_id, error, abort_code);
-
-       /* generate a connection-level abort */
-       spin_lock(&conn->state_lock);
-       if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
-               spin_unlock(&conn->state_lock);
-               _leave(" = 0 [already dead]");
-               return 0;
-       }
-
-       conn->error = error;
-       conn->abort_code = abort_code;
-       conn->state = RXRPC_CONN_LOCALLY_ABORTED;
-       set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
-       spin_unlock(&conn->state_lock);
-
-       msg.msg_name    = &conn->peer->srx.transport;
-       msg.msg_namelen = conn->peer->srx.transport_len;
-       msg.msg_control = NULL;
-       msg.msg_controllen = 0;
-       msg.msg_flags   = 0;
-
-       whdr.epoch      = htonl(conn->proto.epoch);
-       whdr.cid        = htonl(conn->proto.cid);
-       whdr.callNumber = 0;
-       whdr.seq        = 0;
-       whdr.type       = RXRPC_PACKET_TYPE_ABORT;
-       whdr.flags      = conn->out_clientflag;
-       whdr.userStatus = 0;
-       whdr.securityIndex = conn->security_ix;
-       whdr._rsvd      = 0;
-       whdr.serviceId  = htons(conn->service_id);
-
-       word            = htonl(conn->abort_code);
-
-       iov[0].iov_base = &whdr;
-       iov[0].iov_len  = sizeof(whdr);
-       iov[1].iov_base = &word;
-       iov[1].iov_len  = sizeof(word);
-
-       len = iov[0].iov_len + iov[1].iov_len;
-
-       serial = atomic_inc_return(&conn->serial);
-       rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, serial);
-       whdr.serial = htonl(serial);
-
-       ret = kernel_sendmsg(conn->local->socket, &msg, iov, 2, len);
-       if (ret < 0) {
-               trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
-                                   rxrpc_tx_point_conn_abort);
-               _debug("sendmsg failed: %d", ret);
-               return -EAGAIN;
-       }
-
-       trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort);
-
-       conn->peer->last_tx_at = ktime_get_seconds();
-
-       _leave(" = 0");
-       return 0;
-}
-
-/*
  * mark a call as being on a now-secured channel
  * - must be called with BH's disabled.
  */
 static void rxrpc_call_is_secure(struct rxrpc_call *call)
 {
-       _enter("%p", call);
-       if (call) {
-               write_lock(&call->state_lock);
-               if (call->state == RXRPC_CALL_SERVER_SECURING) {
-                       call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
-                       rxrpc_notify_socket(call);
-               }
-               write_unlock(&call->state_lock);
+       if (call && __rxrpc_call_state(call) == RXRPC_CALL_SERVER_SECURING) {
+               rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
+               rxrpc_notify_socket(call);
        }
 }
 
@@ -278,44 +220,22 @@ static void rxrpc_call_is_secure(struct rxrpc_call *call)
  * connection-level Rx packet processor
  */
 static int rxrpc_process_event(struct rxrpc_connection *conn,
-                              struct sk_buff *skb,
-                              u32 *_abort_code)
+                              struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-       int loop, ret;
+       int ret;
 
-       if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
-               _leave(" = -ECONNABORTED [%u]", conn->state);
+       if (conn->state == RXRPC_CONN_ABORTED)
                return -ECONNABORTED;
-       }
 
        _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial);
 
        switch (sp->hdr.type) {
-       case RXRPC_PACKET_TYPE_DATA:
-       case RXRPC_PACKET_TYPE_ACK:
-               rxrpc_conn_retransmit_call(conn, skb,
-                                          sp->hdr.cid & RXRPC_CHANNELMASK);
-               return 0;
-
-       case RXRPC_PACKET_TYPE_BUSY:
-               /* Just ignore BUSY packets for now. */
-               return 0;
-
-       case RXRPC_PACKET_TYPE_ABORT:
-               conn->error = -ECONNABORTED;
-               conn->abort_code = skb->priority;
-               conn->state = RXRPC_CONN_REMOTELY_ABORTED;
-               set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
-               rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
-               return -ECONNABORTED;
-
        case RXRPC_PACKET_TYPE_CHALLENGE:
-               return conn->security->respond_to_challenge(conn, skb,
-                                                           _abort_code);
+               return conn->security->respond_to_challenge(conn, skb);
 
        case RXRPC_PACKET_TYPE_RESPONSE:
-               ret = conn->security->verify_response(conn, skb, _abort_code);
+               ret = conn->security->verify_response(conn, skb);
                if (ret < 0)
                        return ret;
 
@@ -324,27 +244,25 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
                if (ret < 0)
                        return ret;
 
-               spin_lock(&conn->bundle->channel_lock);
                spin_lock(&conn->state_lock);
-
-               if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
+               if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING)
                        conn->state = RXRPC_CONN_SERVICE;
-                       spin_unlock(&conn->state_lock);
-                       for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
-                               rxrpc_call_is_secure(
-                                       rcu_dereference_protected(
-                                               conn->channels[loop].call,
-                                               lockdep_is_held(&conn->bundle->channel_lock)));
-               } else {
-                       spin_unlock(&conn->state_lock);
-               }
+               spin_unlock(&conn->state_lock);
 
-               spin_unlock(&conn->bundle->channel_lock);
+               if (conn->state == RXRPC_CONN_SERVICE) {
+                       /* Offload call state flipping to the I/O thread.  As
+                        * we've already received the packet, put it on the
+                        * front of the queue.
+                        */
+                       skb->mark = RXRPC_SKB_MARK_SERVICE_CONN_SECURED;
+                       rxrpc_get_skb(skb, rxrpc_skb_get_conn_secured);
+                       skb_queue_head(&conn->local->rx_queue, skb);
+                       rxrpc_wake_up_io_thread(conn->local);
+               }
                return 0;
 
        default:
-               trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
-                                     tracepoint_string("bad_conn_pkt"));
+               WARN_ON_ONCE(1);
                return -EPROTO;
        }
 }
@@ -354,26 +272,9 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
  */
 static void rxrpc_secure_connection(struct rxrpc_connection *conn)
 {
-       u32 abort_code;
-       int ret;
-
-       _enter("{%d}", conn->debug_id);
-
-       ASSERT(conn->security_ix != 0);
-
-       if (conn->security->issue_challenge(conn) < 0) {
-               abort_code = RX_CALL_DEAD;
-               ret = -ENOMEM;
-               goto abort;
-       }
-
-       _leave("");
-       return;
-
-abort:
-       _debug("abort %d, %d", ret, abort_code);
-       rxrpc_abort_connection(conn, ret, abort_code);
-       _leave(" [aborted]");
+       if (conn->security->issue_challenge(conn) < 0)
+               rxrpc_abort_conn(conn, NULL, RX_CALL_DEAD, -ENOMEM,
+                                rxrpc_abort_nomem);
 }
 
 /*
@@ -395,9 +296,7 @@ again:
                if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags))
                        continue;
 
-               smp_rmb(); /* vs rxrpc_disconnect_client_call */
-               ack_at = READ_ONCE(chan->final_ack_at);
-
+               ack_at = chan->final_ack_at;
                if (time_before(j, ack_at) && !force) {
                        if (time_before(ack_at, next_j)) {
                                next_j = ack_at;
@@ -424,47 +323,27 @@ again:
 static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
 {
        struct sk_buff *skb;
-       u32 abort_code = RX_PROTOCOL_ERROR;
        int ret;
 
        if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
                rxrpc_secure_connection(conn);
 
-       /* Process delayed ACKs whose time has come. */
-       if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
-               rxrpc_process_delayed_final_acks(conn, false);
-
        /* go through the conn-level event packets, releasing the ref on this
         * connection that each one has when we've finished with it */
        while ((skb = skb_dequeue(&conn->rx_queue))) {
                rxrpc_see_skb(skb, rxrpc_skb_see_conn_work);
-               ret = rxrpc_process_event(conn, skb, &abort_code);
+               ret = rxrpc_process_event(conn, skb);
                switch (ret) {
-               case -EPROTO:
-               case -EKEYEXPIRED:
-               case -EKEYREJECTED:
-                       goto protocol_error;
                case -ENOMEM:
                case -EAGAIN:
-                       goto requeue_and_leave;
-               case -ECONNABORTED:
+                       skb_queue_head(&conn->rx_queue, skb);
+                       rxrpc_queue_conn(conn, rxrpc_conn_queue_retry_work);
+                       break;
                default:
                        rxrpc_free_skb(skb, rxrpc_skb_put_conn_work);
                        break;
                }
        }
-
-       return;
-
-requeue_and_leave:
-       skb_queue_head(&conn->rx_queue, skb);
-       return;
-
-protocol_error:
-       if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
-               goto requeue_and_leave;
-       rxrpc_free_skb(skb, rxrpc_skb_put_conn_work);
-       return;
 }
 
 void rxrpc_process_connection(struct work_struct *work)
@@ -498,44 +377,59 @@ static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
 /*
  * Input a connection-level packet.
  */
-int rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb)
+bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 
-       if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
-               _leave(" = -ECONNABORTED [%u]", conn->state);
-               return -ECONNABORTED;
-       }
-
-       _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial);
-
        switch (sp->hdr.type) {
-       case RXRPC_PACKET_TYPE_DATA:
-       case RXRPC_PACKET_TYPE_ACK:
-               rxrpc_conn_retransmit_call(conn, skb,
-                                          sp->hdr.cid & RXRPC_CHANNELMASK);
-               return 0;
-
        case RXRPC_PACKET_TYPE_BUSY:
                /* Just ignore BUSY packets for now. */
-               return 0;
+               return true;
 
        case RXRPC_PACKET_TYPE_ABORT:
-               conn->error = -ECONNABORTED;
-               conn->abort_code = skb->priority;
-               conn->state = RXRPC_CONN_REMOTELY_ABORTED;
-               set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
-               rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
-               return -ECONNABORTED;
+               if (rxrpc_is_conn_aborted(conn))
+                       return true;
+               rxrpc_input_conn_abort(conn, skb);
+               rxrpc_abort_calls(conn);
+               return true;
 
        case RXRPC_PACKET_TYPE_CHALLENGE:
        case RXRPC_PACKET_TYPE_RESPONSE:
+               if (rxrpc_is_conn_aborted(conn)) {
+                       if (conn->completion == RXRPC_CALL_LOCALLY_ABORTED)
+                               rxrpc_send_conn_abort(conn);
+                       return true;
+               }
                rxrpc_post_packet_to_conn(conn, skb);
-               return 0;
+               return true;
 
        default:
-               trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
-                                     tracepoint_string("bad_conn_pkt"));
-               return -EPROTO;
+               WARN_ON_ONCE(1);
+               return true;
        }
 }
+
+/*
+ * Input a connection event.
+ */
+void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb)
+{
+       unsigned int loop;
+
+       if (test_and_clear_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events))
+               rxrpc_abort_calls(conn);
+
+       switch (skb->mark) {
+       case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
+               if (conn->state != RXRPC_CONN_SERVICE)
+                       break;
+
+               for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
+                       rxrpc_call_is_secure(conn->channels[loop].call);
+               break;
+       }
+
+       /* Process delayed ACKs whose time has come. */
+       if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
+               rxrpc_process_delayed_final_acks(conn, false);
+}
index 3c8f83d..ac85d46 100644 (file)
@@ -23,12 +23,30 @@ static void rxrpc_clean_up_connection(struct work_struct *work);
 static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
                                         unsigned long reap_at);
 
+void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
+{
+       struct rxrpc_local *local = conn->local;
+       bool busy;
+
+       if (WARN_ON_ONCE(!local))
+               return;
+
+       spin_lock_bh(&local->lock);
+       busy = !list_empty(&conn->attend_link);
+       if (!busy) {
+               rxrpc_get_connection(conn, why);
+               list_add_tail(&conn->attend_link, &local->conn_attend_q);
+       }
+       spin_unlock_bh(&local->lock);
+       rxrpc_wake_up_io_thread(local);
+}
+
 static void rxrpc_connection_timer(struct timer_list *timer)
 {
        struct rxrpc_connection *conn =
                container_of(timer, struct rxrpc_connection, timer);
 
-       rxrpc_queue_conn(conn, rxrpc_conn_queue_timer);
+       rxrpc_poke_conn(conn, rxrpc_conn_get_poke_timer);
 }
 
 /*
@@ -49,6 +67,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
                INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
                INIT_LIST_HEAD(&conn->proc_link);
                INIT_LIST_HEAD(&conn->link);
+               mutex_init(&conn->security_lock);
                skb_queue_head_init(&conn->rx_queue);
                conn->rxnet = rxnet;
                conn->security = &rxrpc_no_security;
@@ -82,10 +101,10 @@ struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *lo
 
        _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
 
-       /* Look up client connections by connection ID alone as their IDs are
-        * unique for this machine.
+       /* Look up client connections by connection ID alone as their
+        * IDs are unique for this machine.
         */
-       conn = idr_find(&rxrpc_client_conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
+       conn = idr_find(&local->conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
        if (!conn || refcount_read(&conn->ref) == 0) {
                _debug("no conn");
                goto not_found;
@@ -139,7 +158,7 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
 
        _enter("%d,%x", conn->debug_id, call->cid);
 
-       if (rcu_access_pointer(chan->call) == call) {
+       if (chan->call == call) {
                /* Save the result of the call so that we can repeat it if necessary
                 * through the channel, whilst disposing of the actual call record.
                 */
@@ -159,12 +178,9 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
                        break;
                }
 
-               /* Sync with rxrpc_conn_retransmit(). */
-               smp_wmb();
                chan->last_call = chan->call_id;
                chan->call_id = chan->call_counter;
-
-               rcu_assign_pointer(chan->call, NULL);
+               chan->call = NULL;
        }
 
        _leave("");
@@ -178,6 +194,9 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
 {
        struct rxrpc_connection *conn = call->conn;
 
+       set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
+       rxrpc_see_call(call, rxrpc_call_see_disconnected);
+
        call->peer->cong_ssthresh = call->cong_ssthresh;
 
        if (!hlist_unhashed(&call->error_link)) {
@@ -186,18 +205,17 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
                spin_unlock(&call->peer->lock);
        }
 
-       if (rxrpc_is_client_call(call))
-               return rxrpc_disconnect_client_call(conn->bundle, call);
-
-       spin_lock(&conn->bundle->channel_lock);
-       __rxrpc_disconnect_call(conn, call);
-       spin_unlock(&conn->bundle->channel_lock);
+       if (rxrpc_is_client_call(call)) {
+               rxrpc_disconnect_client_call(call->bundle, call);
+       } else {
+               __rxrpc_disconnect_call(conn, call);
+               conn->idle_timestamp = jiffies;
+               if (atomic_dec_and_test(&conn->active))
+                       rxrpc_set_service_reap_timer(conn->rxnet,
+                                                    jiffies + rxrpc_connection_expiry);
+       }
 
-       set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
-       conn->idle_timestamp = jiffies;
-       if (atomic_dec_and_test(&conn->active))
-               rxrpc_set_service_reap_timer(conn->rxnet,
-                                            jiffies + rxrpc_connection_expiry);
+       rxrpc_put_call(call, rxrpc_call_put_io_thread);
 }
 
 /*
@@ -293,10 +311,10 @@ static void rxrpc_clean_up_connection(struct work_struct *work)
                container_of(work, struct rxrpc_connection, destructor);
        struct rxrpc_net *rxnet = conn->rxnet;
 
-       ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
-              !rcu_access_pointer(conn->channels[1].call) &&
-              !rcu_access_pointer(conn->channels[2].call) &&
-              !rcu_access_pointer(conn->channels[3].call));
+       ASSERT(!conn->channels[0].call &&
+              !conn->channels[1].call &&
+              !conn->channels[2].call &&
+              !conn->channels[3].call);
        ASSERT(list_empty(&conn->cache_link));
 
        del_timer_sync(&conn->timer);
@@ -447,7 +465,6 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
        _enter("");
 
        atomic_dec(&rxnet->nr_conns);
-       rxrpc_destroy_all_client_connections(rxnet);
 
        del_timer_sync(&rxnet->service_conn_reap_timer);
        rxrpc_queue_work(&rxnet->service_conn_reaper);
index 2a55a88..f30323d 100644 (file)
@@ -11,7 +11,6 @@
 static struct rxrpc_bundle rxrpc_service_dummy_bundle = {
        .ref            = REFCOUNT_INIT(1),
        .debug_id       = UINT_MAX,
-       .channel_lock   = __SPIN_LOCK_UNLOCKED(&rxrpc_service_dummy_bundle.channel_lock),
 };
 
 /*
index d0e20e9..367927a 100644 (file)
@@ -9,11 +9,10 @@
 
 #include "ar-internal.h"
 
-static void rxrpc_proto_abort(const char *why,
-                             struct rxrpc_call *call, rxrpc_seq_t seq)
+static void rxrpc_proto_abort(struct rxrpc_call *call, rxrpc_seq_t seq,
+                             enum rxrpc_abort_reason why)
 {
-       if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG))
-               rxrpc_send_abort_packet(call);
+       rxrpc_abort_call(call, seq, RX_PROTOCOL_ERROR, -EBADMSG, why);
 }
 
 /*
@@ -185,7 +184,7 @@ void rxrpc_congestion_degrade(struct rxrpc_call *call)
        if (call->cong_mode != RXRPC_CALL_SLOW_START &&
            call->cong_mode != RXRPC_CALL_CONGEST_AVOIDANCE)
                return;
-       if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
+       if (__rxrpc_call_state(call) == RXRPC_CALL_CLIENT_AWAIT_REPLY)
                return;
 
        rtt = ns_to_ktime(call->peer->srtt_us * (1000 / 8));
@@ -250,47 +249,34 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
  * This occurs when we get an ACKALL packet, the first DATA packet of a reply,
  * or a final ACK packet.
  */
-static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
-                              const char *abort_why)
+static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
+                              enum rxrpc_abort_reason abort_why)
 {
-       unsigned int state;
-
        ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
 
-       write_lock(&call->state_lock);
-
-       state = call->state;
-       switch (state) {
+       switch (__rxrpc_call_state(call)) {
        case RXRPC_CALL_CLIENT_SEND_REQUEST:
        case RXRPC_CALL_CLIENT_AWAIT_REPLY:
-               if (reply_begun)
-                       call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
-               else
-                       call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
+               if (reply_begun) {
+                       rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_RECV_REPLY);
+                       trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
+                       break;
+               }
+
+               rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_REPLY);
+               trace_rxrpc_txqueue(call, rxrpc_txqueue_await_reply);
                break;
 
        case RXRPC_CALL_SERVER_AWAIT_ACK:
-               __rxrpc_call_completed(call);
-               state = call->state;
+               rxrpc_call_completed(call);
+               trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
                break;
 
        default:
-               goto bad_state;
+               kdebug("end_tx %s", rxrpc_call_states[__rxrpc_call_state(call)]);
+               rxrpc_proto_abort(call, call->tx_top, abort_why);
+               break;
        }
-
-       write_unlock(&call->state_lock);
-       if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
-               trace_rxrpc_txqueue(call, rxrpc_txqueue_await_reply);
-       else
-               trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
-       _leave(" = ok");
-       return true;
-
-bad_state:
-       write_unlock(&call->state_lock);
-       kdebug("end_tx %s", rxrpc_call_states[call->state]);
-       rxrpc_proto_abort(abort_why, call, call->tx_top);
-       return false;
 }
 
 /*
@@ -305,18 +291,48 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
        if (call->ackr_reason) {
                now = jiffies;
                timo = now + MAX_JIFFY_OFFSET;
-               WRITE_ONCE(call->resend_at, timo);
+
                WRITE_ONCE(call->delay_ack_at, timo);
                trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
        }
 
        if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
                if (!rxrpc_rotate_tx_window(call, top, &summary)) {
-                       rxrpc_proto_abort("TXL", call, top);
+                       rxrpc_proto_abort(call, top, rxrpc_eproto_early_reply);
                        return false;
                }
        }
-       return rxrpc_end_tx_phase(call, true, "ETD");
+
+       rxrpc_end_tx_phase(call, true, rxrpc_eproto_unexpected_reply);
+       return true;
+}
+
+/*
+ * End the packet reception phase.
+ */
+static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
+{
+       rxrpc_seq_t whigh = READ_ONCE(call->rx_highest_seq);
+
+       _enter("%d,%s", call->debug_id, rxrpc_call_states[__rxrpc_call_state(call)]);
+
+       trace_rxrpc_receive(call, rxrpc_receive_end, 0, whigh);
+
+       switch (__rxrpc_call_state(call)) {
+       case RXRPC_CALL_CLIENT_RECV_REPLY:
+               rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_terminal_ack);
+               rxrpc_call_completed(call);
+               break;
+
+       case RXRPC_CALL_SERVER_RECV_REQUEST:
+               rxrpc_set_call_state(call, RXRPC_CALL_SERVER_ACK_REQUEST);
+               call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
+               rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_processing_op);
+               break;
+
+       default:
+               break;
+       }
 }
 
 static void rxrpc_input_update_ack_window(struct rxrpc_call *call,
@@ -337,8 +353,9 @@ static void rxrpc_input_queue_data(struct rxrpc_call *call, struct sk_buff *skb,
 
        __skb_queue_tail(&call->recvmsg_queue, skb);
        rxrpc_input_update_ack_window(call, window, wtop);
-
        trace_rxrpc_receive(call, last ? why + 1 : why, sp->hdr.serial, sp->hdr.seq);
+       if (last)
+               rxrpc_end_rx_phase(call, sp->hdr.serial);
 }
 
 /*
@@ -366,17 +383,14 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
 
        if (last) {
                if (test_and_set_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
-                   seq + 1 != wtop) {
-                       rxrpc_proto_abort("LSN", call, seq);
-                       return;
-               }
+                   seq + 1 != wtop)
+                       return rxrpc_proto_abort(call, seq, rxrpc_eproto_different_last);
        } else {
                if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
                    after_eq(seq, wtop)) {
                        pr_warn("Packet beyond last: c=%x q=%x window=%x-%x wlimit=%x\n",
                                call->debug_id, seq, window, wtop, wlimit);
-                       rxrpc_proto_abort("LSA", call, seq);
-                       return;
+                       return rxrpc_proto_abort(call, seq, rxrpc_eproto_data_after_last);
                }
        }
 
@@ -550,7 +564,6 @@ protocol_error:
 static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-       enum rxrpc_call_state state;
        rxrpc_serial_t serial = sp->hdr.serial;
        rxrpc_seq_t seq0 = sp->hdr.seq;
 
@@ -558,11 +571,20 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
               atomic64_read(&call->ackr_window), call->rx_highest_seq,
               skb->len, seq0);
 
-       state = READ_ONCE(call->state);
-       if (state >= RXRPC_CALL_COMPLETE)
+       if (__rxrpc_call_is_complete(call))
                return;
 
-       if (state == RXRPC_CALL_SERVER_RECV_REQUEST) {
+       switch (__rxrpc_call_state(call)) {
+       case RXRPC_CALL_CLIENT_SEND_REQUEST:
+       case RXRPC_CALL_CLIENT_AWAIT_REPLY:
+               /* Received data implicitly ACKs all of the request
+                * packets we sent when we're acting as a client.
+                */
+               if (!rxrpc_receiving_reply(call))
+                       goto out_notify;
+               break;
+
+       case RXRPC_CALL_SERVER_RECV_REQUEST: {
                unsigned long timo = READ_ONCE(call->next_req_timo);
                unsigned long now, expect_req_by;
 
@@ -573,18 +595,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
                        rxrpc_reduce_call_timer(call, expect_req_by, now,
                                                rxrpc_timer_set_for_idle);
                }
+               break;
        }
 
-       /* Received data implicitly ACKs all of the request packets we sent
-        * when we're acting as a client.
-        */
-       if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
-            state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
-           !rxrpc_receiving_reply(call))
-               goto out_notify;
+       default:
+               break;
+       }
 
        if (!rxrpc_input_split_jumbo(call, skb)) {
-               rxrpc_proto_abort("VLD", call, sp->hdr.seq);
+               rxrpc_proto_abort(call, sp->hdr.seq, rxrpc_badmsg_bad_jumbo);
                goto out_notify;
        }
        skb = NULL;
@@ -765,7 +784,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
 
        offset = sizeof(struct rxrpc_wire_header);
        if (skb_copy_bits(skb, offset, &ack, sizeof(ack)) < 0)
-               return rxrpc_proto_abort("XAK", call, 0);
+               return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack);
        offset += sizeof(ack);
 
        ack_serial = sp->hdr.serial;
@@ -845,7 +864,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
        ioffset = offset + nr_acks + 3;
        if (skb->len >= ioffset + sizeof(info) &&
            skb_copy_bits(skb, ioffset, &info, sizeof(info)) < 0)
-               return rxrpc_proto_abort("XAI", call, 0);
+               return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_info);
 
        if (nr_acks > 0)
                skb_condense(skb);
@@ -868,10 +887,10 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
                rxrpc_input_ackinfo(call, skb, &info);
 
        if (first_soft_ack == 0)
-               return rxrpc_proto_abort("AK0", call, 0);
+               return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_zero);
 
        /* Ignore ACKs unless we are or have just been transmitting. */
-       switch (READ_ONCE(call->state)) {
+       switch (__rxrpc_call_state(call)) {
        case RXRPC_CALL_CLIENT_SEND_REQUEST:
        case RXRPC_CALL_CLIENT_AWAIT_REPLY:
        case RXRPC_CALL_SERVER_SEND_REPLY:
@@ -883,20 +902,20 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
 
        if (before(hard_ack, call->acks_hard_ack) ||
            after(hard_ack, call->tx_top))
-               return rxrpc_proto_abort("AKW", call, 0);
+               return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_outside_window);
        if (nr_acks > call->tx_top - hard_ack)
-               return rxrpc_proto_abort("AKN", call, 0);
+               return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_sack_overflow);
 
        if (after(hard_ack, call->acks_hard_ack)) {
                if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
-                       rxrpc_end_tx_phase(call, false, "ETA");
+                       rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ack);
                        return;
                }
        }
 
        if (nr_acks > 0) {
                if (offset > (int)skb->len - nr_acks)
-                       return rxrpc_proto_abort("XSA", call, 0);
+                       return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_short_sack);
                rxrpc_input_soft_acks(call, skb->data + offset, first_soft_ack,
                                      nr_acks, &summary);
        }
@@ -918,7 +937,7 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
        struct rxrpc_ack_summary summary = { 0 };
 
        if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
-               rxrpc_end_tx_phase(call, false, "ETL");
+               rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ackall);
 }
 
 /*
@@ -963,27 +982,23 @@ void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
 
        switch (sp->hdr.type) {
        case RXRPC_PACKET_TYPE_DATA:
-               rxrpc_input_data(call, skb);
-               break;
+               return rxrpc_input_data(call, skb);
 
        case RXRPC_PACKET_TYPE_ACK:
-               rxrpc_input_ack(call, skb);
-               break;
+               return rxrpc_input_ack(call, skb);
 
        case RXRPC_PACKET_TYPE_BUSY:
                /* Just ignore BUSY packets from the server; the retry and
                 * lifespan timers will take care of business.  BUSY packets
                 * from the client don't make sense.
                 */
-               break;
+               return;
 
        case RXRPC_PACKET_TYPE_ABORT:
-               rxrpc_input_abort(call, skb);
-               break;
+               return rxrpc_input_abort(call, skb);
 
        case RXRPC_PACKET_TYPE_ACKALL:
-               rxrpc_input_ackall(call, skb);
-               break;
+               return rxrpc_input_ackall(call, skb);
 
        default:
                break;
@@ -998,24 +1013,18 @@ void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
  */
 void rxrpc_implicit_end_call(struct rxrpc_call *call, struct sk_buff *skb)
 {
-       struct rxrpc_connection *conn = call->conn;
-
-       switch (READ_ONCE(call->state)) {
+       switch (__rxrpc_call_state(call)) {
        case RXRPC_CALL_SERVER_AWAIT_ACK:
                rxrpc_call_completed(call);
                fallthrough;
        case RXRPC_CALL_COMPLETE:
                break;
        default:
-               if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, -ESHUTDOWN))
-                       rxrpc_send_abort_packet(call);
+               rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ESHUTDOWN,
+                                rxrpc_eproto_improper_term);
                trace_rxrpc_improper_term(call);
                break;
        }
 
        rxrpc_input_call_event(call, skb);
-
-       spin_lock(&conn->bundle->channel_lock);
-       __rxrpc_disconnect_call(conn, call);
-       spin_unlock(&conn->bundle->channel_lock);
 }
index 0eb8471..34353b6 100644 (file)
@@ -43,25 +43,17 @@ static void none_free_call_crypto(struct rxrpc_call *call)
 }
 
 static int none_respond_to_challenge(struct rxrpc_connection *conn,
-                                    struct sk_buff *skb,
-                                    u32 *_abort_code)
+                                    struct sk_buff *skb)
 {
-       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-
-       trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
-                             tracepoint_string("chall_none"));
-       return -EPROTO;
+       return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
+                               rxrpc_eproto_rxnull_challenge);
 }
 
 static int none_verify_response(struct rxrpc_connection *conn,
-                               struct sk_buff *skb,
-                               u32 *_abort_code)
+                               struct sk_buff *skb)
 {
-       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-
-       trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
-                             tracepoint_string("resp_none"));
-       return -EPROTO;
+       return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
+                               rxrpc_eproto_rxnull_response);
 }
 
 static void none_clear(struct rxrpc_connection *conn)
index 1ad067d..9e9dfb2 100644 (file)
@@ -67,9 +67,31 @@ void rxrpc_error_report(struct sock *sk)
 }
 
 /*
+ * Directly produce an abort from a packet.
+ */
+bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
+                       s32 abort_code, int err)
+{
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+       trace_rxrpc_abort(0, why, sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+                         abort_code, err);
+       skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
+       skb->priority = abort_code;
+       return false;
+}
+
+static bool rxrpc_bad_message(struct sk_buff *skb, enum rxrpc_abort_reason why)
+{
+       return rxrpc_direct_abort(skb, why, RX_PROTOCOL_ERROR, -EBADMSG);
+}
+
+#define just_discard true
+
+/*
  * Process event packets targeted at a local endpoint.
  */
-static void rxrpc_input_version(struct rxrpc_local *local, struct sk_buff *skb)
+static bool rxrpc_input_version(struct rxrpc_local *local, struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        char v;
@@ -81,22 +103,21 @@ static void rxrpc_input_version(struct rxrpc_local *local, struct sk_buff *skb)
                if (v == 0)
                        rxrpc_send_version_request(local, &sp->hdr, skb);
        }
+
+       return true;
 }
 
 /*
  * Extract the wire header from a packet and translate the byte order.
  */
-static noinline
-int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
+static bool rxrpc_extract_header(struct rxrpc_skb_priv *sp,
+                                struct sk_buff *skb)
 {
        struct rxrpc_wire_header whdr;
 
        /* dig out the RxRPC connection details */
-       if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) {
-               trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
-                                     tracepoint_string("bad_hdr"));
-               return -EBADMSG;
-       }
+       if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
+               return rxrpc_bad_message(skb, rxrpc_badmsg_short_hdr);
 
        memset(sp, 0, sizeof(*sp));
        sp->hdr.epoch           = ntohl(whdr.epoch);
@@ -110,7 +131,7 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
        sp->hdr.securityIndex   = whdr.securityIndex;
        sp->hdr._rsvd           = ntohs(whdr._rsvd);
        sp->hdr.serviceId       = ntohs(whdr.serviceId);
-       return 0;
+       return true;
 }
 
 /*
@@ -130,28 +151,28 @@ static bool rxrpc_extract_abort(struct sk_buff *skb)
 /*
  * Process packets received on the local endpoint
  */
-static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
+static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
 {
        struct rxrpc_connection *conn;
        struct sockaddr_rxrpc peer_srx;
        struct rxrpc_skb_priv *sp;
        struct rxrpc_peer *peer = NULL;
        struct sk_buff *skb = *_skb;
-       int ret = 0;
+       bool ret = false;
 
        skb_pull(skb, sizeof(struct udphdr));
 
        sp = rxrpc_skb(skb);
 
        /* dig out the RxRPC connection details */
-       if (rxrpc_extract_header(sp, skb) < 0)
-               goto bad_message;
+       if (!rxrpc_extract_header(sp, skb))
+               return just_discard;
 
        if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
                static int lose;
                if ((lose++ & 7) == 7) {
                        trace_rxrpc_rx_lose(sp);
-                       return 0;
+                       return just_discard;
                }
        }
 
@@ -160,28 +181,28 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
        switch (sp->hdr.type) {
        case RXRPC_PACKET_TYPE_VERSION:
                if (rxrpc_to_client(sp))
-                       return 0;
-               rxrpc_input_version(local, skb);
-               return 0;
+                       return just_discard;
+               return rxrpc_input_version(local, skb);
 
        case RXRPC_PACKET_TYPE_BUSY:
                if (rxrpc_to_server(sp))
-                       return 0;
+                       return just_discard;
                fallthrough;
        case RXRPC_PACKET_TYPE_ACK:
        case RXRPC_PACKET_TYPE_ACKALL:
                if (sp->hdr.callNumber == 0)
-                       goto bad_message;
+                       return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call);
                break;
        case RXRPC_PACKET_TYPE_ABORT:
                if (!rxrpc_extract_abort(skb))
-                       return 0; /* Just discard if malformed */
+                       return just_discard; /* Just discard if malformed */
                break;
 
        case RXRPC_PACKET_TYPE_DATA:
-               if (sp->hdr.callNumber == 0 ||
-                   sp->hdr.seq == 0)
-                       goto bad_message;
+               if (sp->hdr.callNumber == 0)
+                       return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call);
+               if (sp->hdr.seq == 0)
+                       return rxrpc_bad_message(skb, rxrpc_badmsg_zero_seq);
 
                /* Unshare the packet so that it can be modified for in-place
                 * decryption.
@@ -191,7 +212,7 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
                        if (!skb) {
                                rxrpc_eaten_skb(*_skb, rxrpc_skb_eaten_by_unshare_nomem);
                                *_skb = NULL;
-                               return 0;
+                               return just_discard;
                        }
 
                        if (skb != *_skb) {
@@ -205,28 +226,28 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
 
        case RXRPC_PACKET_TYPE_CHALLENGE:
                if (rxrpc_to_server(sp))
-                       return 0;
+                       return just_discard;
                break;
        case RXRPC_PACKET_TYPE_RESPONSE:
                if (rxrpc_to_client(sp))
-                       return 0;
+                       return just_discard;
                break;
 
                /* Packet types 9-11 should just be ignored. */
        case RXRPC_PACKET_TYPE_PARAMS:
        case RXRPC_PACKET_TYPE_10:
        case RXRPC_PACKET_TYPE_11:
-               return 0;
+               return just_discard;
 
        default:
-               goto bad_message;
+               return rxrpc_bad_message(skb, rxrpc_badmsg_unsupported_packet);
        }
 
        if (sp->hdr.serviceId == 0)
-               goto bad_message;
+               return rxrpc_bad_message(skb, rxrpc_badmsg_zero_service);
 
        if (WARN_ON_ONCE(rxrpc_extract_addr_from_skb(&peer_srx, skb) < 0))
-               return true; /* Unsupported address type - discard. */
+               return just_discard; /* Unsupported address type. */
 
        if (peer_srx.transport.family != local->srx.transport.family &&
            (peer_srx.transport.family == AF_INET &&
@@ -234,7 +255,7 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
                pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
                                    peer_srx.transport.family,
                                    local->srx.transport.family);
-               return true; /* Wrong address type - discard. */
+               return just_discard; /* Wrong address type. */
        }
 
        if (rxrpc_to_client(sp)) {
@@ -242,12 +263,8 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
                conn = rxrpc_find_client_connection_rcu(local, &peer_srx, skb);
                conn = rxrpc_get_connection_maybe(conn, rxrpc_conn_get_call_input);
                rcu_read_unlock();
-               if (!conn) {
-                       trace_rxrpc_abort(0, "NCC", sp->hdr.cid,
-                                         sp->hdr.callNumber, sp->hdr.seq,
-                                         RXKADINCONSISTENCY, EBADMSG);
-                       goto protocol_error;
-               }
+               if (!conn)
+                       return rxrpc_protocol_error(skb, rxrpc_eproto_no_client_conn);
 
                ret = rxrpc_input_packet_on_conn(conn, &peer_srx, skb);
                rxrpc_put_connection(conn, rxrpc_conn_put_call_input);
@@ -280,19 +297,7 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
 
        ret = rxrpc_new_incoming_call(local, peer, NULL, &peer_srx, skb);
        rxrpc_put_peer(peer, rxrpc_peer_put_input);
-       if (ret < 0)
-               goto reject_packet;
-       return 0;
-
-bad_message:
-       trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                         RX_PROTOCOL_ERROR, EBADMSG);
-protocol_error:
-       skb->priority = RX_PROTOCOL_ERROR;
-       skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
-reject_packet:
-       rxrpc_reject_packet(local, skb);
-       return 0;
+       return ret;
 }
 
 /*
@@ -306,21 +311,23 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
        struct rxrpc_channel *chan;
        struct rxrpc_call *call = NULL;
        unsigned int channel;
+       bool ret;
 
        if (sp->hdr.securityIndex != conn->security_ix)
-               goto wrong_security;
+               return rxrpc_direct_abort(skb, rxrpc_eproto_wrong_security,
+                                         RXKADINCONSISTENCY, -EBADMSG);
 
        if (sp->hdr.serviceId != conn->service_id) {
                int old_id;
 
                if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags))
-                       goto reupgrade;
+                       return rxrpc_protocol_error(skb, rxrpc_eproto_reupgrade);
+
                old_id = cmpxchg(&conn->service_id, conn->orig_service_id,
                                 sp->hdr.serviceId);
-
                if (old_id != conn->orig_service_id &&
                    old_id != sp->hdr.serviceId)
-                       goto reupgrade;
+                       return rxrpc_protocol_error(skb, rxrpc_eproto_bad_upgrade);
        }
 
        if (after(sp->hdr.serial, conn->hi_serial))
@@ -336,19 +343,19 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
 
        /* Ignore really old calls */
        if (sp->hdr.callNumber < chan->last_call)
-               return 0;
+               return just_discard;
 
        if (sp->hdr.callNumber == chan->last_call) {
                if (chan->call ||
                    sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
-                       return 0;
+                       return just_discard;
 
                /* For the previous service call, if completed successfully, we
                 * discard all further packets.
                 */
                if (rxrpc_conn_is_service(conn) &&
                    chan->last_type == RXRPC_PACKET_TYPE_ACK)
-                       return 0;
+                       return just_discard;
 
                /* But otherwise we need to retransmit the final packet from
                 * data cached in the connection record.
@@ -358,19 +365,17 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
                                            sp->hdr.seq,
                                            sp->hdr.serial,
                                            sp->hdr.flags);
-               rxrpc_input_conn_packet(conn, skb);
-               return 0;
+               rxrpc_conn_retransmit_call(conn, skb, channel);
+               return just_discard;
        }
 
-       rcu_read_lock();
-       call = rxrpc_try_get_call(rcu_dereference(chan->call),
-                                 rxrpc_call_get_input);
-       rcu_read_unlock();
+       call = rxrpc_try_get_call(chan->call, rxrpc_call_get_input);
 
        if (sp->hdr.callNumber > chan->call_id) {
                if (rxrpc_to_client(sp)) {
                        rxrpc_put_call(call, rxrpc_call_put_input);
-                       goto reject_packet;
+                       return rxrpc_protocol_error(skb,
+                                                   rxrpc_eproto_unexpected_implicit_end);
                }
 
                if (call) {
@@ -382,38 +387,14 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
 
        if (!call) {
                if (rxrpc_to_client(sp))
-                       goto bad_message;
-               if (rxrpc_new_incoming_call(conn->local, conn->peer, conn,
-                                           peer_srx, skb) == 0)
-                       return 0;
-               goto reject_packet;
+                       return rxrpc_protocol_error(skb, rxrpc_eproto_no_client_call);
+               return rxrpc_new_incoming_call(conn->local, conn->peer, conn,
+                                              peer_srx, skb);
        }
 
-       rxrpc_input_call_event(call, skb);
+       ret = rxrpc_input_call_event(call, skb);
        rxrpc_put_call(call, rxrpc_call_put_input);
-       return 0;
-
-wrong_security:
-       trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                         RXKADINCONSISTENCY, EBADMSG);
-       skb->priority = RXKADINCONSISTENCY;
-       goto post_abort;
-
-reupgrade:
-       trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                         RX_PROTOCOL_ERROR, EBADMSG);
-       goto protocol_error;
-
-bad_message:
-       trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                         RX_PROTOCOL_ERROR, EBADMSG);
-protocol_error:
-       skb->priority = RX_PROTOCOL_ERROR;
-post_abort:
-       skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
-reject_packet:
-       rxrpc_reject_packet(conn->local, skb);
-       return 0;
+       return ret;
 }
 
 /*
@@ -421,6 +402,7 @@ reject_packet:
  */
 int rxrpc_io_thread(void *data)
 {
+       struct rxrpc_connection *conn;
        struct sk_buff_head rx_queue;
        struct rxrpc_local *local = data;
        struct rxrpc_call *call;
@@ -436,6 +418,24 @@ int rxrpc_io_thread(void *data)
        for (;;) {
                rxrpc_inc_stat(local->rxnet, stat_io_loop);
 
+               /* Deal with connections that want immediate attention. */
+               conn = list_first_entry_or_null(&local->conn_attend_q,
+                                               struct rxrpc_connection,
+                                               attend_link);
+               if (conn) {
+                       spin_lock_bh(&local->lock);
+                       list_del_init(&conn->attend_link);
+                       spin_unlock_bh(&local->lock);
+
+                       rxrpc_input_conn_event(conn, NULL);
+                       rxrpc_put_connection(conn, rxrpc_conn_put_poke);
+                       continue;
+               }
+
+               if (test_and_clear_bit(RXRPC_CLIENT_CONN_REAP_TIMER,
+                                      &local->client_conn_flags))
+                       rxrpc_discard_expired_client_conns(local);
+
                /* Deal with calls that want immediate attention. */
                if ((call = list_first_entry_or_null(&local->call_attend_q,
                                                     struct rxrpc_call,
@@ -450,12 +450,17 @@ int rxrpc_io_thread(void *data)
                        continue;
                }
 
+               if (!list_empty(&local->new_client_calls))
+                       rxrpc_connect_client_calls(local);
+
                /* Process received packets and errors. */
                if ((skb = __skb_dequeue(&rx_queue))) {
+                       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
                        switch (skb->mark) {
                        case RXRPC_SKB_MARK_PACKET:
                                skb->priority = 0;
-                               rxrpc_input_packet(local, &skb);
+                               if (!rxrpc_input_packet(local, &skb))
+                                       rxrpc_reject_packet(local, skb);
                                trace_rxrpc_rx_done(skb->mark, skb->priority);
                                rxrpc_free_skb(skb, rxrpc_skb_put_input);
                                break;
@@ -463,6 +468,11 @@ int rxrpc_io_thread(void *data)
                                rxrpc_input_error(local, skb);
                                rxrpc_free_skb(skb, rxrpc_skb_put_error_report);
                                break;
+                       case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
+                               rxrpc_input_conn_event(sp->conn, skb);
+                               rxrpc_put_connection(sp->conn, rxrpc_conn_put_poke);
+                               rxrpc_free_skb(skb, rxrpc_skb_put_conn_secured);
+                               break;
                        default:
                                WARN_ON_ONCE(1);
                                rxrpc_free_skb(skb, rxrpc_skb_put_unknown);
@@ -481,7 +491,11 @@ int rxrpc_io_thread(void *data)
                set_current_state(TASK_INTERRUPTIBLE);
                should_stop = kthread_should_stop();
                if (!skb_queue_empty(&local->rx_queue) ||
-                   !list_empty(&local->call_attend_q)) {
+                   !list_empty(&local->call_attend_q) ||
+                   !list_empty(&local->conn_attend_q) ||
+                   !list_empty(&local->new_client_calls) ||
+                   test_bit(RXRPC_CLIENT_CONN_REAP_TIMER,
+                            &local->client_conn_flags)) {
                        __set_current_state(TASK_RUNNING);
                        continue;
                }
index 270b63d..b8eaca5 100644 (file)
@@ -82,31 +82,59 @@ static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
        }
 }
 
+static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
+{
+       struct rxrpc_local *local =
+               container_of(timer, struct rxrpc_local, client_conn_reap_timer);
+
+       if (local->kill_all_client_conns &&
+           test_and_set_bit(RXRPC_CLIENT_CONN_REAP_TIMER, &local->client_conn_flags))
+               rxrpc_wake_up_io_thread(local);
+}
+
 /*
  * Allocate a new local endpoint.
  */
-static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
+static struct rxrpc_local *rxrpc_alloc_local(struct net *net,
                                             const struct sockaddr_rxrpc *srx)
 {
        struct rxrpc_local *local;
+       u32 tmp;
 
        local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
        if (local) {
                refcount_set(&local->ref, 1);
                atomic_set(&local->active_users, 1);
-               local->rxnet = rxnet;
+               local->net = net;
+               local->rxnet = rxrpc_net(net);
                INIT_HLIST_NODE(&local->link);
                init_rwsem(&local->defrag_sem);
                init_completion(&local->io_thread_ready);
                skb_queue_head_init(&local->rx_queue);
+               INIT_LIST_HEAD(&local->conn_attend_q);
                INIT_LIST_HEAD(&local->call_attend_q);
+
                local->client_bundles = RB_ROOT;
                spin_lock_init(&local->client_bundles_lock);
+               local->kill_all_client_conns = false;
+               INIT_LIST_HEAD(&local->idle_client_conns);
+               timer_setup(&local->client_conn_reap_timer,
+                           rxrpc_client_conn_reap_timeout, 0);
+
                spin_lock_init(&local->lock);
                rwlock_init(&local->services_lock);
                local->debug_id = atomic_inc_return(&rxrpc_debug_id);
                memcpy(&local->srx, srx, sizeof(*srx));
                local->srx.srx_service = 0;
+               idr_init(&local->conn_ids);
+               get_random_bytes(&tmp, sizeof(tmp));
+               tmp &= 0x3fffffff;
+               if (tmp == 0)
+                       tmp = 1;
+               idr_set_cursor(&local->conn_ids, tmp);
+               INIT_LIST_HEAD(&local->new_client_calls);
+               spin_lock_init(&local->client_call_lock);
+
                trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, 1);
        }
 
@@ -248,7 +276,7 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
                goto found;
        }
 
-       local = rxrpc_alloc_local(rxnet, srx);
+       local = rxrpc_alloc_local(net, srx);
        if (!local)
                goto nomem;
 
@@ -407,6 +435,7 @@ void rxrpc_destroy_local(struct rxrpc_local *local)
         * local endpoint.
         */
        rxrpc_purge_queue(&local->rx_queue);
+       rxrpc_purge_client_connections(local);
 }
 
 /*
index 5905530..a0319c0 100644 (file)
 
 unsigned int rxrpc_net_id;
 
-static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
-{
-       struct rxrpc_net *rxnet =
-               container_of(timer, struct rxrpc_net, client_conn_reap_timer);
-
-       if (rxnet->live)
-               rxrpc_queue_work(&rxnet->client_conn_reaper);
-}
-
 static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
 {
        struct rxrpc_net *rxnet =
@@ -63,14 +54,6 @@ static __net_init int rxrpc_init_net(struct net *net)
                    rxrpc_service_conn_reap_timeout, 0);
 
        atomic_set(&rxnet->nr_client_conns, 0);
-       rxnet->kill_all_client_conns = false;
-       spin_lock_init(&rxnet->client_conn_cache_lock);
-       mutex_init(&rxnet->client_conn_discard_lock);
-       INIT_LIST_HEAD(&rxnet->idle_client_conns);
-       INIT_WORK(&rxnet->client_conn_reaper,
-                 rxrpc_discard_expired_client_conns);
-       timer_setup(&rxnet->client_conn_reap_timer,
-                   rxrpc_client_conn_reap_timeout, 0);
 
        INIT_HLIST_HEAD(&rxnet->local_endpoints);
        mutex_init(&rxnet->local_mutex);
index 3d8c9f8..a9746be 100644 (file)
@@ -261,7 +261,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
                                      rxrpc_tx_point_call_ack);
        rxrpc_tx_backoff(call, ret);
 
-       if (call->state < RXRPC_CALL_COMPLETE) {
+       if (!__rxrpc_call_is_complete(call)) {
                if (ret < 0)
                        rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
                rxrpc_set_keepalive(call);
@@ -545,6 +545,62 @@ send_fragmentable:
 }
 
 /*
+ * Transmit a connection-level abort.
+ */
+void rxrpc_send_conn_abort(struct rxrpc_connection *conn)
+{
+       struct rxrpc_wire_header whdr;
+       struct msghdr msg;
+       struct kvec iov[2];
+       __be32 word;
+       size_t len;
+       u32 serial;
+       int ret;
+
+       msg.msg_name    = &conn->peer->srx.transport;
+       msg.msg_namelen = conn->peer->srx.transport_len;
+       msg.msg_control = NULL;
+       msg.msg_controllen = 0;
+       msg.msg_flags   = 0;
+
+       whdr.epoch      = htonl(conn->proto.epoch);
+       whdr.cid        = htonl(conn->proto.cid);
+       whdr.callNumber = 0;
+       whdr.seq        = 0;
+       whdr.type       = RXRPC_PACKET_TYPE_ABORT;
+       whdr.flags      = conn->out_clientflag;
+       whdr.userStatus = 0;
+       whdr.securityIndex = conn->security_ix;
+       whdr._rsvd      = 0;
+       whdr.serviceId  = htons(conn->service_id);
+
+       word            = htonl(conn->abort_code);
+
+       iov[0].iov_base = &whdr;
+       iov[0].iov_len  = sizeof(whdr);
+       iov[1].iov_base = &word;
+       iov[1].iov_len  = sizeof(word);
+
+       len = iov[0].iov_len + iov[1].iov_len;
+
+       serial = atomic_inc_return(&conn->serial);
+       whdr.serial = htonl(serial);
+
+       iov_iter_kvec(&msg.msg_iter, WRITE, iov, 2, len);
+       ret = do_udp_sendmsg(conn->local->socket, &msg, len);
+       if (ret < 0) {
+               trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
+                                   rxrpc_tx_point_conn_abort);
+               _debug("sendmsg failed: %d", ret);
+               return;
+       }
+
+       trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort);
+
+       conn->peer->last_tx_at = ktime_get_seconds();
+}
+
+/*
  * Reject a packet through the local endpoint.
  */
 void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
@@ -667,7 +723,7 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
 static inline void rxrpc_instant_resend(struct rxrpc_call *call,
                                        struct rxrpc_txbuf *txb)
 {
-       if (call->state < RXRPC_CALL_COMPLETE)
+       if (!__rxrpc_call_is_complete(call))
                kdebug("resend");
 }
 
index 4eecea2..8d7a715 100644 (file)
@@ -147,10 +147,10 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
  * assess the MTU size for the network interface through which this peer is
  * reached
  */
-static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx,
+static void rxrpc_assess_MTU_size(struct rxrpc_local *local,
                                  struct rxrpc_peer *peer)
 {
-       struct net *net = sock_net(&rx->sk);
+       struct net *net = local->net;
        struct dst_entry *dst;
        struct rtable *rt;
        struct flowi fl;
@@ -236,11 +236,11 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp,
 /*
  * Initialise peer record.
  */
-static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
+static void rxrpc_init_peer(struct rxrpc_local *local, struct rxrpc_peer *peer,
                            unsigned long hash_key)
 {
        peer->hash_key = hash_key;
-       rxrpc_assess_MTU_size(rx, peer);
+       rxrpc_assess_MTU_size(local, peer);
        peer->mtu = peer->if_mtu;
        peer->rtt_last_req = ktime_get_real();
 
@@ -272,8 +272,7 @@ static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
 /*
  * Set up a new peer.
  */
-static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
-                                           struct rxrpc_local *local,
+static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
                                            struct sockaddr_rxrpc *srx,
                                            unsigned long hash_key,
                                            gfp_t gfp)
@@ -285,7 +284,7 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
        peer = rxrpc_alloc_peer(local, gfp, rxrpc_peer_new_client);
        if (peer) {
                memcpy(&peer->srx, srx, sizeof(*srx));
-               rxrpc_init_peer(rx, peer, hash_key);
+               rxrpc_init_peer(local, peer, hash_key);
        }
 
        _leave(" = %p", peer);
@@ -304,14 +303,13 @@ static void rxrpc_free_peer(struct rxrpc_peer *peer)
  * since we've already done a search in the list from the non-reentrant context
  * (the data_ready handler) that is the only place we can add new peers.
  */
-void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
-                            struct rxrpc_peer *peer)
+void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
 {
        struct rxrpc_net *rxnet = local->rxnet;
        unsigned long hash_key;
 
        hash_key = rxrpc_peer_hash_key(local, &peer->srx);
-       rxrpc_init_peer(rx, peer, hash_key);
+       rxrpc_init_peer(local, peer, hash_key);
 
        spin_lock(&rxnet->peer_hash_lock);
        hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
@@ -322,8 +320,7 @@ void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
 /*
  * obtain a remote transport endpoint for the specified address
  */
-struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
-                                    struct rxrpc_local *local,
+struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
                                     struct sockaddr_rxrpc *srx, gfp_t gfp)
 {
        struct rxrpc_peer *peer, *candidate;
@@ -343,7 +340,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
                /* The peer is not yet present in hash - create a candidate
                 * for a new record and then redo the search.
                 */
-               candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp);
+               candidate = rxrpc_create_peer(local, srx, hash_key, gfp);
                if (!candidate) {
                        _leave(" = NULL [nomem]");
                        return NULL;
index 3a59591..750158a 100644 (file)
 
 static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
        [RXRPC_CONN_UNUSED]                     = "Unused  ",
+       [RXRPC_CONN_CLIENT_UNSECURED]           = "ClUnsec ",
        [RXRPC_CONN_CLIENT]                     = "Client  ",
        [RXRPC_CONN_SERVICE_PREALLOC]           = "SvPrealc",
        [RXRPC_CONN_SERVICE_UNSECURED]          = "SvUnsec ",
        [RXRPC_CONN_SERVICE_CHALLENGING]        = "SvChall ",
        [RXRPC_CONN_SERVICE]                    = "SvSecure",
-       [RXRPC_CONN_REMOTELY_ABORTED]           = "RmtAbort",
-       [RXRPC_CONN_LOCALLY_ABORTED]            = "LocAbort",
+       [RXRPC_CONN_ABORTED]                    = "Aborted ",
 };
 
 /*
@@ -51,6 +51,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
        struct rxrpc_local *local;
        struct rxrpc_call *call;
        struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+       enum rxrpc_call_state state;
        unsigned long timeout = 0;
        rxrpc_seq_t acks_hard_ack;
        char lbuff[50], rbuff[50];
@@ -75,7 +76,8 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
 
        sprintf(rbuff, "%pISpc", &call->dest_srx.transport);
 
-       if (call->state != RXRPC_CALL_SERVER_PREALLOC) {
+       state = rxrpc_call_state(call);
+       if (state != RXRPC_CALL_SERVER_PREALLOC) {
                timeout = READ_ONCE(call->expect_rx_by);
                timeout -= jiffies;
        }
@@ -92,7 +94,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
                   call->call_id,
                   rxrpc_is_service_call(call) ? "Svc" : "Clt",
                   refcount_read(&call->ref),
-                  rxrpc_call_states[call->state],
+                  rxrpc_call_states[state],
                   call->abort_code,
                   call->debug_id,
                   acks_hard_ack, READ_ONCE(call->tx_top) - acks_hard_ack,
@@ -143,6 +145,7 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
 {
        struct rxrpc_connection *conn;
        struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+       const char *state;
        char lbuff[50], rbuff[50];
 
        if (v == &rxnet->conn_proc_list) {
@@ -163,9 +166,11 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
        }
 
        sprintf(lbuff, "%pISpc", &conn->local->srx.transport);
-
        sprintf(rbuff, "%pISpc", &conn->peer->srx.transport);
 print:
+       state = rxrpc_is_conn_aborted(conn) ?
+               rxrpc_call_completions[conn->completion] :
+               rxrpc_conn_states[conn->state];
        seq_printf(seq,
                   "UDP   %-47.47s %-47.47s %4x %08x %s %3u %3d"
                   " %s %08x %08x %08x %08x %08x %08x %08x\n",
@@ -176,7 +181,7 @@ print:
                   rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
                   refcount_read(&conn->ref),
                   atomic_read(&conn->active),
-                  rxrpc_conn_states[conn->state],
+                  state,
                   key_serial(conn->key),
                   atomic_read(&conn->serial),
                   conn->hi_serial,
index 6ebd644..dd54cee 100644 (file)
@@ -59,85 +59,6 @@ void rxrpc_notify_socket(struct rxrpc_call *call)
 }
 
 /*
- * Transition a call to the complete state.
- */
-bool __rxrpc_set_call_completion(struct rxrpc_call *call,
-                                enum rxrpc_call_completion compl,
-                                u32 abort_code,
-                                int error)
-{
-       if (call->state < RXRPC_CALL_COMPLETE) {
-               call->abort_code = abort_code;
-               call->error = error;
-               call->completion = compl;
-               call->state = RXRPC_CALL_COMPLETE;
-               trace_rxrpc_call_complete(call);
-               wake_up(&call->waitq);
-               rxrpc_notify_socket(call);
-               return true;
-       }
-       return false;
-}
-
-bool rxrpc_set_call_completion(struct rxrpc_call *call,
-                              enum rxrpc_call_completion compl,
-                              u32 abort_code,
-                              int error)
-{
-       bool ret = false;
-
-       if (call->state < RXRPC_CALL_COMPLETE) {
-               write_lock(&call->state_lock);
-               ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
-               write_unlock(&call->state_lock);
-       }
-       return ret;
-}
-
-/*
- * Record that a call successfully completed.
- */
-bool __rxrpc_call_completed(struct rxrpc_call *call)
-{
-       return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
-}
-
-bool rxrpc_call_completed(struct rxrpc_call *call)
-{
-       bool ret = false;
-
-       if (call->state < RXRPC_CALL_COMPLETE) {
-               write_lock(&call->state_lock);
-               ret = __rxrpc_call_completed(call);
-               write_unlock(&call->state_lock);
-       }
-       return ret;
-}
-
-/*
- * Record that a call is locally aborted.
- */
-bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
-                       rxrpc_seq_t seq, u32 abort_code, int error)
-{
-       trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
-                         abort_code, error);
-       return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
-                                          abort_code, error);
-}
-
-bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
-                     rxrpc_seq_t seq, u32 abort_code, int error)
-{
-       bool ret;
-
-       write_lock(&call->state_lock);
-       ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
-       write_unlock(&call->state_lock);
-       return ret;
-}
-
-/*
  * Pass a call terminating message to userspace.
  */
 static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
@@ -168,7 +89,7 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
                ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp);
                break;
        default:
-               pr_err("Invalid terminal call state %u\n", call->state);
+               pr_err("Invalid terminal call state %u\n", call->completion);
                BUG();
                break;
        }
@@ -180,41 +101,6 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
 }
 
 /*
- * End the packet reception phase.
- */
-static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
-{
-       rxrpc_seq_t whigh = READ_ONCE(call->rx_highest_seq);
-
-       _enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]);
-
-       trace_rxrpc_receive(call, rxrpc_receive_end, 0, whigh);
-
-       if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY)
-               rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_terminal_ack);
-
-       write_lock(&call->state_lock);
-
-       switch (call->state) {
-       case RXRPC_CALL_CLIENT_RECV_REPLY:
-               __rxrpc_call_completed(call);
-               write_unlock(&call->state_lock);
-               break;
-
-       case RXRPC_CALL_SERVER_RECV_REQUEST:
-               call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
-               call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
-               write_unlock(&call->state_lock);
-               rxrpc_propose_delay_ACK(call, serial,
-                                       rxrpc_propose_ack_processing_op);
-               break;
-       default:
-               write_unlock(&call->state_lock);
-               break;
-       }
-}
-
-/*
  * Discard a packet we've used up and advance the Rx window by one.
  */
 static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
@@ -244,10 +130,9 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
 
        trace_rxrpc_receive(call, last ? rxrpc_receive_rotate_last : rxrpc_receive_rotate,
                            serial, call->rx_consumed);
-       if (last) {
-               rxrpc_end_rx_phase(call, serial);
-               return;
-       }
+
+       if (last)
+               set_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags);
 
        /* Check to see if there's an ACK that needs sending. */
        acked = atomic_add_return(call->rx_consumed - old_consumed,
@@ -272,7 +157,8 @@ static int rxrpc_verify_data(struct rxrpc_call *call, struct sk_buff *skb)
 /*
  * Deliver messages to a call.  This keeps processing packets until the buffer
  * is filled and we find either more DATA (returns 0) or the end of the DATA
- * (returns 1).  If more packets are required, it returns -EAGAIN.
+ * (returns 1).  If more packets are required, it returns -EAGAIN and if the
+ * call has failed it returns -EIO.
  */
 static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
                              struct msghdr *msg, struct iov_iter *iter,
@@ -288,7 +174,13 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
        rx_pkt_offset = call->rx_pkt_offset;
        rx_pkt_len = call->rx_pkt_len;
 
-       if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
+       if (rxrpc_call_has_failed(call)) {
+               seq = lower_32_bits(atomic64_read(&call->ackr_window)) - 1;
+               ret = -EIO;
+               goto done;
+       }
+
+       if (test_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags)) {
                seq = lower_32_bits(atomic64_read(&call->ackr_window)) - 1;
                ret = 1;
                goto done;
@@ -312,14 +204,15 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
 
                if (rx_pkt_offset == 0) {
                        ret2 = rxrpc_verify_data(call, skb);
-                       rx_pkt_offset = sp->offset;
-                       rx_pkt_len = sp->len;
                        trace_rxrpc_recvdata(call, rxrpc_recvmsg_next, seq,
-                                            rx_pkt_offset, rx_pkt_len, ret2);
+                                            sp->offset, sp->len, ret2);
                        if (ret2 < 0) {
+                               kdebug("verify = %d", ret2);
                                ret = ret2;
                                goto out;
                        }
+                       rx_pkt_offset = sp->offset;
+                       rx_pkt_len = sp->len;
                } else {
                        trace_rxrpc_recvdata(call, rxrpc_recvmsg_cont, seq,
                                             rx_pkt_offset, rx_pkt_len, 0);
@@ -494,36 +387,36 @@ try_again:
                msg->msg_namelen = len;
        }
 
-       switch (READ_ONCE(call->state)) {
-       case RXRPC_CALL_CLIENT_RECV_REPLY:
-       case RXRPC_CALL_SERVER_RECV_REQUEST:
-       case RXRPC_CALL_SERVER_ACK_REQUEST:
-               ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
-                                        flags, &copied);
-               if (ret == -EAGAIN)
-                       ret = 0;
-
-               if (!skb_queue_empty(&call->recvmsg_queue))
-                       rxrpc_notify_socket(call);
-               break;
-       default:
+       ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
+                                flags, &copied);
+       if (ret == -EAGAIN)
                ret = 0;
-               break;
-       }
-
+       if (ret == -EIO)
+               goto call_failed;
        if (ret < 0)
                goto error_unlock_call;
 
-       if (call->state == RXRPC_CALL_COMPLETE) {
-               ret = rxrpc_recvmsg_term(call, msg);
-               if (ret < 0)
-                       goto error_unlock_call;
-               if (!(flags & MSG_PEEK))
-                       rxrpc_release_call(rx, call);
-               msg->msg_flags |= MSG_EOR;
-               ret = 1;
-       }
+       if (rxrpc_call_is_complete(call) &&
+           skb_queue_empty(&call->recvmsg_queue))
+               goto call_complete;
+       if (rxrpc_call_has_failed(call))
+               goto call_failed;
 
+       rxrpc_notify_socket(call);
+       goto not_yet_complete;
+
+call_failed:
+       rxrpc_purge_queue(&call->recvmsg_queue);
+call_complete:
+       ret = rxrpc_recvmsg_term(call, msg);
+       if (ret < 0)
+               goto error_unlock_call;
+       if (!(flags & MSG_PEEK))
+               rxrpc_release_call(rx, call);
+       msg->msg_flags |= MSG_EOR;
+       ret = 1;
+
+not_yet_complete:
        if (ret == 0)
                msg->msg_flags |= MSG_MORE;
        else
@@ -586,49 +479,34 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
        size_t offset = 0;
        int ret;
 
-       _enter("{%d,%s},%zu,%d",
-              call->debug_id, rxrpc_call_states[call->state],
-              *_len, want_more);
-
-       ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_SECURING);
+       _enter("{%d},%zu,%d", call->debug_id, *_len, want_more);
 
        mutex_lock(&call->user_mutex);
 
-       switch (READ_ONCE(call->state)) {
-       case RXRPC_CALL_CLIENT_RECV_REPLY:
-       case RXRPC_CALL_SERVER_RECV_REQUEST:
-       case RXRPC_CALL_SERVER_ACK_REQUEST:
-               ret = rxrpc_recvmsg_data(sock, call, NULL, iter,
-                                        *_len, 0, &offset);
-               *_len -= offset;
-               if (ret < 0)
-                       goto out;
-
-               /* We can only reach here with a partially full buffer if we
-                * have reached the end of the data.  We must otherwise have a
-                * full buffer or have been given -EAGAIN.
-                */
-               if (ret == 1) {
-                       if (iov_iter_count(iter) > 0)
-                               goto short_data;
-                       if (!want_more)
-                               goto read_phase_complete;
-                       ret = 0;
-                       goto out;
-               }
-
-               if (!want_more)
-                       goto excess_data;
+       ret = rxrpc_recvmsg_data(sock, call, NULL, iter, *_len, 0, &offset);
+       *_len -= offset;
+       if (ret == -EIO)
+               goto call_failed;
+       if (ret < 0)
                goto out;
 
-       case RXRPC_CALL_COMPLETE:
-               goto call_complete;
-
-       default:
-               ret = -EINPROGRESS;
+       /* We can only reach here with a partially full buffer if we have
+        * reached the end of the data.  We must otherwise have a full buffer
+        * or have been given -EAGAIN.
+        */
+       if (ret == 1) {
+               if (iov_iter_count(iter) > 0)
+                       goto short_data;
+               if (!want_more)
+                       goto read_phase_complete;
+               ret = 0;
                goto out;
        }
 
+       if (!want_more)
+               goto excess_data;
+       goto out;
+
 read_phase_complete:
        ret = 1;
 out:
@@ -639,14 +517,18 @@ out:
        return ret;
 
 short_data:
-       trace_rxrpc_rx_eproto(call, 0, tracepoint_string("short_data"));
+       trace_rxrpc_abort(call->debug_id, rxrpc_recvmsg_short_data,
+                         call->cid, call->call_id, call->rx_consumed,
+                         0, -EBADMSG);
        ret = -EBADMSG;
        goto out;
 excess_data:
-       trace_rxrpc_rx_eproto(call, 0, tracepoint_string("excess_data"));
+       trace_rxrpc_abort(call->debug_id, rxrpc_recvmsg_excess_data,
+                         call->cid, call->call_id, call->rx_consumed,
+                         0, -EMSGSIZE);
        ret = -EMSGSIZE;
        goto out;
-call_complete:
+call_failed:
        *_abort = call->abort_code;
        ret = call->error;
        if (call->completion == RXRPC_CALL_SUCCEEDED) {
index d123372..1bf571a 100644 (file)
@@ -411,18 +411,15 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        struct rxrpc_crypt iv;
        struct scatterlist sg[16];
-       bool aborted;
        u32 data_size, buf;
        u16 check;
        int ret;
 
        _enter("");
 
-       if (sp->len < 8) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_hdr", "V1H",
-                                            RXKADSEALEDINCON);
-               goto protocol_error;
-       }
+       if (sp->len < 8)
+               return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
+                                         rxkad_abort_1_short_header);
 
        /* Decrypt the skbuff in-place.  TODO: We really want to decrypt
         * directly into the target buffer.
@@ -442,11 +439,9 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
        skcipher_request_zero(req);
 
        /* Extract the decrypted packet length */
-       if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_len", "XV1",
-                                            RXKADDATALEN);
-               goto protocol_error;
-       }
+       if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0)
+               return rxrpc_abort_eproto(call, skb, RXKADDATALEN,
+                                         rxkad_abort_1_short_encdata);
        sp->offset += sizeof(sechdr);
        sp->len    -= sizeof(sechdr);
 
@@ -456,26 +451,16 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
        check = buf >> 16;
        check ^= seq ^ call->call_id;
        check &= 0xffff;
-       if (check != 0) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_check", "V1C",
-                                            RXKADSEALEDINCON);
-               goto protocol_error;
-       }
-
-       if (data_size > sp->len) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_datalen", "V1L",
-                                            RXKADDATALEN);
-               goto protocol_error;
-       }
+       if (check != 0)
+               return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
+                                         rxkad_abort_1_short_check);
+       if (data_size > sp->len)
+               return rxrpc_abort_eproto(call, skb, RXKADDATALEN,
+                                         rxkad_abort_1_short_data);
        sp->len = data_size;
 
        _leave(" = 0 [dlen=%x]", data_size);
        return 0;
-
-protocol_error:
-       if (aborted)
-               rxrpc_send_abort_packet(call);
-       return -EPROTO;
 }
 
 /*
@@ -490,18 +475,15 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        struct rxrpc_crypt iv;
        struct scatterlist _sg[4], *sg;
-       bool aborted;
        u32 data_size, buf;
        u16 check;
        int nsg, ret;
 
        _enter(",{%d}", sp->len);
 
-       if (sp->len < 8) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_hdr", "V2H",
-                                            RXKADSEALEDINCON);
-               goto protocol_error;
-       }
+       if (sp->len < 8)
+               return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
+                                         rxkad_abort_2_short_header);
 
        /* Decrypt the skbuff in-place.  TODO: We really want to decrypt
         * directly into the target buffer.
@@ -513,7 +495,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
        } else {
                sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO);
                if (!sg)
-                       goto nomem;
+                       return -ENOMEM;
        }
 
        sg_init_table(sg, nsg);
@@ -537,11 +519,9 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
                kfree(sg);
 
        /* Extract the decrypted packet length */
-       if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_len", "XV2",
-                                            RXKADDATALEN);
-               goto protocol_error;
-       }
+       if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0)
+               return rxrpc_abort_eproto(call, skb, RXKADDATALEN,
+                                         rxkad_abort_2_short_len);
        sp->offset += sizeof(sechdr);
        sp->len    -= sizeof(sechdr);
 
@@ -551,30 +531,17 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
        check = buf >> 16;
        check ^= seq ^ call->call_id;
        check &= 0xffff;
-       if (check != 0) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_check", "V2C",
-                                            RXKADSEALEDINCON);
-               goto protocol_error;
-       }
+       if (check != 0)
+               return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
+                                         rxkad_abort_2_short_check);
 
-       if (data_size > sp->len) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_datalen", "V2L",
-                                            RXKADDATALEN);
-               goto protocol_error;
-       }
+       if (data_size > sp->len)
+               return rxrpc_abort_eproto(call, skb, RXKADDATALEN,
+                                         rxkad_abort_2_short_data);
 
        sp->len = data_size;
        _leave(" = 0 [dlen=%x]", data_size);
        return 0;
-
-protocol_error:
-       if (aborted)
-               rxrpc_send_abort_packet(call);
-       return -EPROTO;
-
-nomem:
-       _leave(" = -ENOMEM");
-       return -ENOMEM;
 }
 
 /*
@@ -590,7 +557,6 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
                __be32 buf[2];
        } crypto __aligned(8);
        rxrpc_seq_t seq = sp->hdr.seq;
-       bool aborted;
        int ret;
        u16 cksum;
        u32 x, y;
@@ -627,9 +593,9 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
                cksum = 1; /* zero checksums are not permitted */
 
        if (cksum != sp->hdr.cksum) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_csum", "VCK",
-                                            RXKADSEALEDINCON);
-               goto protocol_error;
+               ret = rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
+                                        rxkad_abort_bad_checksum);
+               goto out;
        }
 
        switch (call->conn->security_level) {
@@ -647,13 +613,9 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
                break;
        }
 
+out:
        skcipher_request_free(req);
        return ret;
-
-protocol_error:
-       if (aborted)
-               rxrpc_send_abort_packet(call);
-       return -EPROTO;
 }
 
 /*
@@ -821,34 +783,30 @@ static int rxkad_encrypt_response(struct rxrpc_connection *conn,
  * respond to a challenge packet
  */
 static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
-                                     struct sk_buff *skb,
-                                     u32 *_abort_code)
+                                     struct sk_buff *skb)
 {
        const struct rxrpc_key_token *token;
        struct rxkad_challenge challenge;
        struct rxkad_response *resp;
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-       const char *eproto;
-       u32 version, nonce, min_level, abort_code;
-       int ret;
+       u32 version, nonce, min_level;
+       int ret = -EPROTO;
 
        _enter("{%d,%x}", conn->debug_id, key_serial(conn->key));
 
-       eproto = tracepoint_string("chall_no_key");
-       abort_code = RX_PROTOCOL_ERROR;
        if (!conn->key)
-               goto protocol_error;
+               return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
+                                       rxkad_abort_chall_no_key);
 
-       abort_code = RXKADEXPIRED;
        ret = key_validate(conn->key);
        if (ret < 0)
-               goto other_error;
+               return rxrpc_abort_conn(conn, skb, RXKADEXPIRED, ret,
+                                       rxkad_abort_chall_key_expired);
 
-       eproto = tracepoint_string("chall_short");
-       abort_code = RXKADPACKETSHORT;
        if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
                          &challenge, sizeof(challenge)) < 0)
-               goto protocol_error;
+               return rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO,
+                                       rxkad_abort_chall_short);
 
        version = ntohl(challenge.version);
        nonce = ntohl(challenge.nonce);
@@ -856,15 +814,13 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
 
        trace_rxrpc_rx_challenge(conn, sp->hdr.serial, version, nonce, min_level);
 
-       eproto = tracepoint_string("chall_ver");
-       abort_code = RXKADINCONSISTENCY;
        if (version != RXKAD_VERSION)
-               goto protocol_error;
+               return rxrpc_abort_conn(conn, skb, RXKADINCONSISTENCY, -EPROTO,
+                                       rxkad_abort_chall_version);
 
-       abort_code = RXKADLEVELFAIL;
-       ret = -EACCES;
        if (conn->security_level < min_level)
-               goto other_error;
+               return rxrpc_abort_conn(conn, skb, RXKADLEVELFAIL, -EACCES,
+                                       rxkad_abort_chall_level);
 
        token = conn->key->payload.data[0];
 
@@ -893,13 +849,6 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
                ret = rxkad_send_response(conn, &sp->hdr, resp, token->kad);
        kfree(resp);
        return ret;
-
-protocol_error:
-       trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
-       ret = -EPROTO;
-other_error:
-       *_abort_code = abort_code;
-       return ret;
 }
 
 /*
@@ -910,20 +859,15 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
                                struct sk_buff *skb,
                                void *ticket, size_t ticket_len,
                                struct rxrpc_crypt *_session_key,
-                               time64_t *_expiry,
-                               u32 *_abort_code)
+                               time64_t *_expiry)
 {
        struct skcipher_request *req;
-       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        struct rxrpc_crypt iv, key;
        struct scatterlist sg[1];
        struct in_addr addr;
        unsigned int life;
-       const char *eproto;
        time64_t issue, now;
        bool little_endian;
-       int ret;
-       u32 abort_code;
        u8 *p, *q, *name, *end;
 
        _enter("{%d},{%x}", conn->debug_id, key_serial(server_key));
@@ -935,10 +879,9 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
 
        memcpy(&iv, &server_key->payload.data[2], sizeof(iv));
 
-       ret = -ENOMEM;
        req = skcipher_request_alloc(server_key->payload.data[0], GFP_NOFS);
        if (!req)
-               goto temporary_error;
+               return -ENOMEM;
 
        sg_init_one(&sg[0], ticket, ticket_len);
        skcipher_request_set_callback(req, 0, NULL, NULL);
@@ -949,18 +892,21 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
        p = ticket;
        end = p + ticket_len;
 
-#define Z(field)                                       \
-       ({                                              \
-               u8 *__str = p;                          \
-               eproto = tracepoint_string("rxkad_bad_"#field); \
-               q = memchr(p, 0, end - p);              \
-               if (!q || q - p > (field##_SZ))         \
-                       goto bad_ticket;                \
-               for (; p < q; p++)                      \
-                       if (!isprint(*p))               \
-                               goto bad_ticket;        \
-               p++;                                    \
-               __str;                                  \
+#define Z(field, fieldl)                                               \
+       ({                                                              \
+               u8 *__str = p;                                          \
+               q = memchr(p, 0, end - p);                              \
+               if (!q || q - p > field##_SZ)                           \
+                       return rxrpc_abort_conn(                        \
+                               conn, skb, RXKADBADTICKET, -EPROTO,     \
+                               rxkad_abort_resp_tkt_##fieldl);         \
+               for (; p < q; p++)                                      \
+                       if (!isprint(*p))                               \
+                               return rxrpc_abort_conn(                \
+                                       conn, skb, RXKADBADTICKET, -EPROTO, \
+                                       rxkad_abort_resp_tkt_##fieldl); \
+               p++;                                                    \
+               __str;                                                  \
        })
 
        /* extract the ticket flags */
@@ -969,20 +915,20 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
        p++;
 
        /* extract the authentication name */
-       name = Z(ANAME);
+       name = Z(ANAME, aname);
        _debug("KIV ANAME: %s", name);
 
        /* extract the principal's instance */
-       name = Z(INST);
+       name = Z(INST, inst);
        _debug("KIV INST : %s", name);
 
        /* extract the principal's authentication domain */
-       name = Z(REALM);
+       name = Z(REALM, realm);
        _debug("KIV REALM: %s", name);
 
-       eproto = tracepoint_string("rxkad_bad_len");
        if (end - p < 4 + 8 + 4 + 2)
-               goto bad_ticket;
+               return rxrpc_abort_conn(conn, skb, RXKADBADTICKET, -EPROTO,
+                                       rxkad_abort_resp_tkt_short);
 
        /* get the IPv4 address of the entity that requested the ticket */
        memcpy(&addr, p, sizeof(addr));
@@ -1014,38 +960,23 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
        _debug("KIV ISSUE: %llx [%llx]", issue, now);
 
        /* check the ticket is in date */
-       if (issue > now) {
-               abort_code = RXKADNOAUTH;
-               ret = -EKEYREJECTED;
-               goto other_error;
-       }
-
-       if (issue < now - life) {
-               abort_code = RXKADEXPIRED;
-               ret = -EKEYEXPIRED;
-               goto other_error;
-       }
+       if (issue > now)
+               return rxrpc_abort_conn(conn, skb, RXKADNOAUTH, -EKEYREJECTED,
+                                       rxkad_abort_resp_tkt_future);
+       if (issue < now - life)
+               return rxrpc_abort_conn(conn, skb, RXKADEXPIRED, -EKEYEXPIRED,
+                                       rxkad_abort_resp_tkt_expired);
 
        *_expiry = issue + life;
 
        /* get the service name */
-       name = Z(SNAME);
+       name = Z(SNAME, sname);
        _debug("KIV SNAME: %s", name);
 
        /* get the service instance name */
-       name = Z(INST);
+       name = Z(INST, sinst);
        _debug("KIV SINST: %s", name);
        return 0;
-
-bad_ticket:
-       trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
-       abort_code = RXKADBADTICKET;
-       ret = -EPROTO;
-other_error:
-       *_abort_code = abort_code;
-       return ret;
-temporary_error:
-       return ret;
 }
 
 /*
@@ -1086,17 +1017,15 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
  * verify a response
  */
 static int rxkad_verify_response(struct rxrpc_connection *conn,
-                                struct sk_buff *skb,
-                                u32 *_abort_code)
+                                struct sk_buff *skb)
 {
        struct rxkad_response *response;
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        struct rxrpc_crypt session_key;
        struct key *server_key;
-       const char *eproto;
        time64_t expiry;
        void *ticket;
-       u32 abort_code, version, kvno, ticket_len, level;
+       u32 version, kvno, ticket_len, level;
        __be32 csum;
        int ret, i;
 
@@ -1104,22 +1033,18 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
 
        server_key = rxrpc_look_up_server_security(conn, skb, 0, 0);
        if (IS_ERR(server_key)) {
-               switch (PTR_ERR(server_key)) {
+               ret = PTR_ERR(server_key);
+               switch (ret) {
                case -ENOKEY:
-                       abort_code = RXKADUNKNOWNKEY;
-                       break;
+                       return rxrpc_abort_conn(conn, skb, RXKADUNKNOWNKEY, ret,
+                                               rxkad_abort_resp_nokey);
                case -EKEYEXPIRED:
-                       abort_code = RXKADEXPIRED;
-                       break;
+                       return rxrpc_abort_conn(conn, skb, RXKADEXPIRED, ret,
+                                               rxkad_abort_resp_key_expired);
                default:
-                       abort_code = RXKADNOAUTH;
-                       break;
+                       return rxrpc_abort_conn(conn, skb, RXKADNOAUTH, ret,
+                                               rxkad_abort_resp_key_rejected);
                }
-               trace_rxrpc_abort(0, "SVK",
-                                 sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                                 abort_code, PTR_ERR(server_key));
-               *_abort_code = abort_code;
-               return -EPROTO;
        }
 
        ret = -ENOMEM;
@@ -1127,11 +1052,12 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
        if (!response)
                goto temporary_error;
 
-       eproto = tracepoint_string("rxkad_rsp_short");
-       abort_code = RXKADPACKETSHORT;
        if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
-                         response, sizeof(*response)) < 0)
+                         response, sizeof(*response)) < 0) {
+               rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO,
+                                rxkad_abort_resp_short);
                goto protocol_error;
+       }
 
        version = ntohl(response->version);
        ticket_len = ntohl(response->ticket_len);
@@ -1139,20 +1065,23 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
 
        trace_rxrpc_rx_response(conn, sp->hdr.serial, version, kvno, ticket_len);
 
-       eproto = tracepoint_string("rxkad_rsp_ver");
-       abort_code = RXKADINCONSISTENCY;
-       if (version != RXKAD_VERSION)
+       if (version != RXKAD_VERSION) {
+               rxrpc_abort_conn(conn, skb, RXKADINCONSISTENCY, -EPROTO,
+                                rxkad_abort_resp_version);
                goto protocol_error;
+       }
 
-       eproto = tracepoint_string("rxkad_rsp_tktlen");
-       abort_code = RXKADTICKETLEN;
-       if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN)
+       if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN) {
+               rxrpc_abort_conn(conn, skb, RXKADTICKETLEN, -EPROTO,
+                                rxkad_abort_resp_tkt_len);
                goto protocol_error;
+       }
 
-       eproto = tracepoint_string("rxkad_rsp_unkkey");
-       abort_code = RXKADUNKNOWNKEY;
-       if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5)
+       if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5) {
+               rxrpc_abort_conn(conn, skb, RXKADUNKNOWNKEY, -EPROTO,
+                                rxkad_abort_resp_unknown_tkt);
                goto protocol_error;
+       }
 
        /* extract the kerberos ticket and decrypt and decode it */
        ret = -ENOMEM;
@@ -1160,15 +1089,15 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
        if (!ticket)
                goto temporary_error_free_resp;
 
-       eproto = tracepoint_string("rxkad_tkt_short");
-       abort_code = RXKADPACKETSHORT;
-       ret = skb_copy_bits(skb, sizeof(struct rxrpc_wire_header) + sizeof(*response),
-                           ticket, ticket_len);
-       if (ret < 0)
-               goto temporary_error_free_ticket;
+       if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header) + sizeof(*response),
+                         ticket, ticket_len) < 0) {
+               rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO,
+                                rxkad_abort_resp_short_tkt);
+               goto protocol_error;
+       }
 
        ret = rxkad_decrypt_ticket(conn, server_key, skb, ticket, ticket_len,
-                                  &session_key, &expiry, _abort_code);
+                                  &session_key, &expiry);
        if (ret < 0)
                goto temporary_error_free_ticket;
 
@@ -1176,56 +1105,61 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
         * response */
        rxkad_decrypt_response(conn, response, &session_key);
 
-       eproto = tracepoint_string("rxkad_rsp_param");
-       abort_code = RXKADSEALEDINCON;
-       if (ntohl(response->encrypted.epoch) != conn->proto.epoch)
-               goto protocol_error_free;
-       if (ntohl(response->encrypted.cid) != conn->proto.cid)
-               goto protocol_error_free;
-       if (ntohl(response->encrypted.securityIndex) != conn->security_ix)
+       if (ntohl(response->encrypted.epoch) != conn->proto.epoch ||
+           ntohl(response->encrypted.cid) != conn->proto.cid ||
+           ntohl(response->encrypted.securityIndex) != conn->security_ix) {
+               rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+                                rxkad_abort_resp_bad_param);
                goto protocol_error_free;
+       }
+
        csum = response->encrypted.checksum;
        response->encrypted.checksum = 0;
        rxkad_calc_response_checksum(response);
-       eproto = tracepoint_string("rxkad_rsp_csum");
-       if (response->encrypted.checksum != csum)
+       if (response->encrypted.checksum != csum) {
+               rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+                                rxkad_abort_resp_bad_checksum);
                goto protocol_error_free;
+       }
 
-       spin_lock(&conn->bundle->channel_lock);
        for (i = 0; i < RXRPC_MAXCALLS; i++) {
-               struct rxrpc_call *call;
                u32 call_id = ntohl(response->encrypted.call_id[i]);
+               u32 counter = READ_ONCE(conn->channels[i].call_counter);
+
+               if (call_id > INT_MAX) {
+                       rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+                                        rxkad_abort_resp_bad_callid);
+                       goto protocol_error_free;
+               }
 
-               eproto = tracepoint_string("rxkad_rsp_callid");
-               if (call_id > INT_MAX)
-                       goto protocol_error_unlock;
-
-               eproto = tracepoint_string("rxkad_rsp_callctr");
-               if (call_id < conn->channels[i].call_counter)
-                       goto protocol_error_unlock;
-
-               eproto = tracepoint_string("rxkad_rsp_callst");
-               if (call_id > conn->channels[i].call_counter) {
-                       call = rcu_dereference_protected(
-                               conn->channels[i].call,
-                               lockdep_is_held(&conn->bundle->channel_lock));
-                       if (call && call->state < RXRPC_CALL_COMPLETE)
-                               goto protocol_error_unlock;
+               if (call_id < counter) {
+                       rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+                                        rxkad_abort_resp_call_ctr);
+                       goto protocol_error_free;
+               }
+
+               if (call_id > counter) {
+                       if (conn->channels[i].call) {
+                               rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+                                                rxkad_abort_resp_call_state);
+                               goto protocol_error_free;
+                       }
                        conn->channels[i].call_counter = call_id;
                }
        }
-       spin_unlock(&conn->bundle->channel_lock);
 
-       eproto = tracepoint_string("rxkad_rsp_seq");
-       abort_code = RXKADOUTOFSEQUENCE;
-       if (ntohl(response->encrypted.inc_nonce) != conn->rxkad.nonce + 1)
+       if (ntohl(response->encrypted.inc_nonce) != conn->rxkad.nonce + 1) {
+               rxrpc_abort_conn(conn, skb, RXKADOUTOFSEQUENCE, -EPROTO,
+                                rxkad_abort_resp_ooseq);
                goto protocol_error_free;
+       }
 
-       eproto = tracepoint_string("rxkad_rsp_level");
-       abort_code = RXKADLEVELFAIL;
        level = ntohl(response->encrypted.level);
-       if (level > RXRPC_SECURITY_ENCRYPT)
+       if (level > RXRPC_SECURITY_ENCRYPT) {
+               rxrpc_abort_conn(conn, skb, RXKADLEVELFAIL, -EPROTO,
+                                rxkad_abort_resp_level);
                goto protocol_error_free;
+       }
        conn->security_level = level;
 
        /* create a key to hold the security data and expiration time - after
@@ -1240,15 +1174,11 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
        _leave(" = 0");
        return 0;
 
-protocol_error_unlock:
-       spin_unlock(&conn->bundle->channel_lock);
 protocol_error_free:
        kfree(ticket);
 protocol_error:
        kfree(response);
-       trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
        key_put(server_key);
-       *_abort_code = abort_code;
        return -EPROTO;
 
 temporary_error_free_ticket:
index d33a109..16dcabb 100644 (file)
@@ -10,6 +10,8 @@
 #include <linux/slab.h>
 #include <net/sock.h>
 #include <net/af_rxrpc.h>
+#define RXRPC_TRACE_ONLY_DEFINE_ENUMS
+#include <trace/events/rxrpc.h>
 
 MODULE_DESCRIPTION("rxperf test server (afs)");
 MODULE_AUTHOR("Red Hat, Inc.");
@@ -307,12 +309,14 @@ static void rxperf_deliver_to_call(struct work_struct *work)
                case -EOPNOTSUPP:
                        abort_code = RXGEN_OPCODE;
                        rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
-                                               abort_code, ret, "GOP");
+                                               abort_code, ret,
+                                               rxperf_abort_op_not_supported);
                        goto call_complete;
                case -ENOTSUPP:
                        abort_code = RX_USER_ABORT;
                        rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
-                                               abort_code, ret, "GUA");
+                                               abort_code, ret,
+                                               rxperf_abort_op_not_supported);
                        goto call_complete;
                case -EIO:
                        pr_err("Call %u in bad state %u\n",
@@ -324,11 +328,13 @@ static void rxperf_deliver_to_call(struct work_struct *work)
                case -ENOMEM:
                case -EFAULT:
                        rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
-                                               RXGEN_SS_UNMARSHAL, ret, "GUM");
+                                               RXGEN_SS_UNMARSHAL, ret,
+                                               rxperf_abort_unmarshal_error);
                        goto call_complete;
                default:
                        rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
-                                               RX_CALL_DEAD, ret, "GER");
+                                               RX_CALL_DEAD, ret,
+                                               rxperf_abort_general_error);
                        goto call_complete;
                }
        }
@@ -523,7 +529,8 @@ static int rxperf_process_call(struct rxperf_call *call)
 
        if (n == -ENOMEM)
                rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
-                                       RXGEN_SS_MARSHAL, -ENOMEM, "GOM");
+                                       RXGEN_SS_MARSHAL, -ENOMEM,
+                                       rxperf_abort_oom);
        return n;
 }
 
index ab968f6..cb8dd1d 100644 (file)
@@ -97,38 +97,31 @@ found:
  */
 int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
 {
-       const struct rxrpc_security *sec;
        struct rxrpc_key_token *token;
        struct key *key = conn->key;
-       int ret;
+       int ret = 0;
 
        _enter("{%d},{%x}", conn->debug_id, key_serial(key));
 
-       if (!key)
-               return 0;
-
-       ret = key_validate(key);
-       if (ret < 0)
-               return ret;
-
        for (token = key->payload.data[0]; token; token = token->next) {
-               sec = rxrpc_security_lookup(token->security_index);
-               if (sec)
+               if (token->security_index == conn->security->security_index)
                        goto found;
        }
        return -EKEYREJECTED;
 
 found:
-       conn->security = sec;
-
-       ret = conn->security->init_connection_security(conn, token);
-       if (ret < 0) {
-               conn->security = &rxrpc_no_security;
-               return ret;
+       mutex_lock(&conn->security_lock);
+       if (conn->state == RXRPC_CONN_CLIENT_UNSECURED) {
+               ret = conn->security->init_connection_security(conn, token);
+               if (ret == 0) {
+                       spin_lock(&conn->state_lock);
+                       if (conn->state == RXRPC_CONN_CLIENT_UNSECURED)
+                               conn->state = RXRPC_CONN_CLIENT;
+                       spin_unlock(&conn->state_lock);
+               }
        }
-
-       _leave(" = 0");
-       return 0;
+       mutex_unlock(&conn->security_lock);
+       return ret;
 }
 
 /*
@@ -144,21 +137,15 @@ const struct rxrpc_security *rxrpc_get_incoming_security(struct rxrpc_sock *rx,
 
        sec = rxrpc_security_lookup(sp->hdr.securityIndex);
        if (!sec) {
-               trace_rxrpc_abort(0, "SVS",
-                                 sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                                 RX_INVALID_OPERATION, EKEYREJECTED);
-               skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
-               skb->priority = RX_INVALID_OPERATION;
+               rxrpc_direct_abort(skb, rxrpc_abort_unsupported_security,
+                                  RX_INVALID_OPERATION, -EKEYREJECTED);
                return NULL;
        }
 
        if (sp->hdr.securityIndex != RXRPC_SECURITY_NONE &&
            !rx->securities) {
-               trace_rxrpc_abort(0, "SVR",
-                                 sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                                 RX_INVALID_OPERATION, EKEYREJECTED);
-               skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
-               skb->priority = sec->no_key_abort;
+               rxrpc_direct_abort(skb, rxrpc_abort_no_service_key,
+                                  sec->no_key_abort, -EKEYREJECTED);
                return NULL;
        }
 
@@ -191,9 +178,9 @@ struct key *rxrpc_look_up_server_security(struct rxrpc_connection *conn,
                sprintf(kdesc, "%u:%u",
                        sp->hdr.serviceId, sp->hdr.securityIndex);
 
-       rcu_read_lock();
+       read_lock(&conn->local->services_lock);
 
-       rx = rcu_dereference(conn->local->service);
+       rx = conn->local->service;
        if (!rx)
                goto out;
 
@@ -215,6 +202,6 @@ struct key *rxrpc_look_up_server_security(struct rxrpc_connection *conn,
        }
 
 out:
-       rcu_read_unlock();
+       read_unlock(&conn->local->services_lock);
        return key;
 }
index cde1e65..da49fcf 100644 (file)
 #include "ar-internal.h"
 
 /*
+ * Propose an abort to be made in the I/O thread.
+ */
+bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error,
+                        enum rxrpc_abort_reason why)
+{
+       _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why);
+
+       if (!call->send_abort && !rxrpc_call_is_complete(call)) {
+               call->send_abort_why = why;
+               call->send_abort_err = error;
+               call->send_abort_seq = 0;
+               /* Request abort locklessly vs rxrpc_input_call_event(). */
+               smp_store_release(&call->send_abort, abort_code);
+               rxrpc_poke_call(call, rxrpc_call_poke_abort);
+               return true;
+       }
+
+       return false;
+}
+
+/*
+ * Wait for a call to become connected.  Interruption here doesn't cause the
+ * call to be aborted.
+ */
+static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo)
+{
+       DECLARE_WAITQUEUE(myself, current);
+       int ret = 0;
+
+       _enter("%d", call->debug_id);
+
+       if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN)
+               return call->error;
+
+       add_wait_queue_exclusive(&call->waitq, &myself);
+
+       for (;;) {
+               ret = call->error;
+               if (ret < 0)
+                       break;
+
+               switch (call->interruptibility) {
+               case RXRPC_INTERRUPTIBLE:
+               case RXRPC_PREINTERRUPTIBLE:
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       break;
+               case RXRPC_UNINTERRUPTIBLE:
+               default:
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       break;
+               }
+               if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) {
+                       ret = call->error;
+                       break;
+               }
+               if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
+                    call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
+                   signal_pending(current)) {
+                       ret = sock_intr_errno(*timeo);
+                       break;
+               }
+               *timeo = schedule_timeout(*timeo);
+       }
+
+       remove_wait_queue(&call->waitq, &myself);
+       __set_current_state(TASK_RUNNING);
+
+       if (ret == 0 && rxrpc_call_is_complete(call))
+               ret = call->error;
+
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/*
  * Return true if there's sufficient Tx queue space.
  */
 static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win)
@@ -39,7 +114,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
                if (rxrpc_check_tx_space(call, NULL))
                        return 0;
 
-               if (call->state >= RXRPC_CALL_COMPLETE)
+               if (rxrpc_call_is_complete(call))
                        return call->error;
 
                if (signal_pending(current))
@@ -74,7 +149,7 @@ static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx,
                if (rxrpc_check_tx_space(call, &tx_win))
                        return 0;
 
-               if (call->state >= RXRPC_CALL_COMPLETE)
+               if (rxrpc_call_is_complete(call))
                        return call->error;
 
                if (timeout == 0 &&
@@ -103,7 +178,7 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
                if (rxrpc_check_tx_space(call, NULL))
                        return 0;
 
-               if (call->state >= RXRPC_CALL_COMPLETE)
+               if (rxrpc_call_is_complete(call))
                        return call->error;
 
                trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
@@ -168,7 +243,6 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
                               struct rxrpc_txbuf *txb,
                               rxrpc_notify_end_tx_t notify_end_tx)
 {
-       unsigned long now;
        rxrpc_seq_t seq = txb->seq;
        bool last = test_bit(RXRPC_TXBUF_LAST, &txb->flags), poke;
 
@@ -191,36 +265,10 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
        poke = list_empty(&call->tx_sendmsg);
        list_add_tail(&txb->call_link, &call->tx_sendmsg);
        call->tx_prepared = seq;
+       if (last)
+               rxrpc_notify_end_tx(rx, call, notify_end_tx);
        spin_unlock(&call->tx_lock);
 
-       if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
-               _debug("________awaiting reply/ACK__________");
-               write_lock(&call->state_lock);
-               switch (call->state) {
-               case RXRPC_CALL_CLIENT_SEND_REQUEST:
-                       call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
-                       rxrpc_notify_end_tx(rx, call, notify_end_tx);
-                       break;
-               case RXRPC_CALL_SERVER_ACK_REQUEST:
-                       call->state = RXRPC_CALL_SERVER_SEND_REPLY;
-                       now = jiffies;
-                       WRITE_ONCE(call->delay_ack_at, now + MAX_JIFFY_OFFSET);
-                       if (call->ackr_reason == RXRPC_ACK_DELAY)
-                               call->ackr_reason = 0;
-                       trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
-                       if (!last)
-                               break;
-                       fallthrough;
-               case RXRPC_CALL_SERVER_SEND_REPLY:
-                       call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
-                       rxrpc_notify_end_tx(rx, call, notify_end_tx);
-                       break;
-               default:
-                       break;
-               }
-               write_unlock(&call->state_lock);
-       }
-
        if (poke)
                rxrpc_poke_call(call, rxrpc_call_poke_start);
 }
@@ -245,6 +293,16 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
 
        timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 
+       ret = rxrpc_wait_to_be_connected(call, &timeo);
+       if (ret < 0)
+               return ret;
+
+       if (call->conn->state == RXRPC_CONN_CLIENT_UNSECURED) {
+               ret = rxrpc_init_client_conn_security(call->conn);
+               if (ret < 0)
+                       return ret;
+       }
+
        /* this should be in poll */
        sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
@@ -252,15 +310,20 @@ reload:
        ret = -EPIPE;
        if (sk->sk_shutdown & SEND_SHUTDOWN)
                goto maybe_error;
-       state = READ_ONCE(call->state);
+       state = rxrpc_call_state(call);
        ret = -ESHUTDOWN;
        if (state >= RXRPC_CALL_COMPLETE)
                goto maybe_error;
        ret = -EPROTO;
        if (state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
            state != RXRPC_CALL_SERVER_ACK_REQUEST &&
-           state != RXRPC_CALL_SERVER_SEND_REPLY)
+           state != RXRPC_CALL_SERVER_SEND_REPLY) {
+               /* Request phase complete for this client call */
+               trace_rxrpc_abort(call->debug_id, rxrpc_sendmsg_late_send,
+                                 call->cid, call->call_id, call->rx_consumed,
+                                 0, -EPROTO);
                goto maybe_error;
+       }
 
        ret = -EMSGSIZE;
        if (call->tx_total_len != -1) {
@@ -329,7 +392,7 @@ reload:
 
                /* check for the far side aborting the call or a network error
                 * occurring */
-               if (call->state == RXRPC_CALL_COMPLETE)
+               if (rxrpc_call_is_complete(call))
                        goto call_terminated;
 
                /* add the packet to the send queue if it's now full */
@@ -354,12 +417,9 @@ reload:
 
 success:
        ret = copied;
-       if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) {
-               read_lock(&call->state_lock);
-               if (call->error < 0)
-                       ret = call->error;
-               read_unlock(&call->state_lock);
-       }
+       if (rxrpc_call_is_complete(call) &&
+           call->error < 0)
+               ret = call->error;
 out:
        call->tx_pending = txb;
        _leave(" = %d", ret);
@@ -543,7 +603,6 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
                                     atomic_inc_return(&rxrpc_debug_id));
        /* The socket is now unlocked */
 
-       rxrpc_put_peer(cp.peer, rxrpc_peer_put_discard_tmp);
        _leave(" = %p\n", call);
        return call;
 }
@@ -556,7 +615,6 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
 int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
        __releases(&rx->sk.sk_lock.slock)
 {
-       enum rxrpc_call_state state;
        struct rxrpc_call *call;
        unsigned long now, j;
        bool dropped_lock = false;
@@ -598,10 +656,10 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                        return PTR_ERR(call);
                /* ... and we have the call lock. */
                ret = 0;
-               if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE)
+               if (rxrpc_call_is_complete(call))
                        goto out_put_unlock;
        } else {
-               switch (READ_ONCE(call->state)) {
+               switch (rxrpc_call_state(call)) {
                case RXRPC_CALL_UNINITIALISED:
                case RXRPC_CALL_CLIENT_AWAIT_CONN:
                case RXRPC_CALL_SERVER_PREALLOC:
@@ -655,17 +713,13 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                break;
        }
 
-       state = READ_ONCE(call->state);
-       _debug("CALL %d USR %lx ST %d on CONN %p",
-              call->debug_id, call->user_call_ID, state, call->conn);
-
-       if (state >= RXRPC_CALL_COMPLETE) {
+       if (rxrpc_call_is_complete(call)) {
                /* it's too late for this call */
                ret = -ESHUTDOWN;
        } else if (p.command == RXRPC_CMD_SEND_ABORT) {
+               rxrpc_propose_abort(call, p.abort_code, -ECONNABORTED,
+                                   rxrpc_abort_call_sendmsg);
                ret = 0;
-               if (rxrpc_abort_call("CMD", call, 0, p.abort_code, -ECONNABORTED))
-                       ret = rxrpc_send_abort_packet(call);
        } else if (p.command != RXRPC_CMD_SEND_DATA) {
                ret = -EINVAL;
        } else {
@@ -705,34 +759,17 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
        bool dropped_lock = false;
        int ret;
 
-       _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
+       _enter("{%d},", call->debug_id);
 
        ASSERTCMP(msg->msg_name, ==, NULL);
        ASSERTCMP(msg->msg_control, ==, NULL);
 
        mutex_lock(&call->user_mutex);
 
-       _debug("CALL %d USR %lx ST %d on CONN %p",
-              call->debug_id, call->user_call_ID, call->state, call->conn);
-
-       switch (READ_ONCE(call->state)) {
-       case RXRPC_CALL_CLIENT_SEND_REQUEST:
-       case RXRPC_CALL_SERVER_ACK_REQUEST:
-       case RXRPC_CALL_SERVER_SEND_REPLY:
-               ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
-                                     notify_end_tx, &dropped_lock);
-               break;
-       case RXRPC_CALL_COMPLETE:
-               read_lock(&call->state_lock);
+       ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
+                             notify_end_tx, &dropped_lock);
+       if (ret == -ESHUTDOWN)
                ret = call->error;
-               read_unlock(&call->state_lock);
-               break;
-       default:
-               /* Request phase complete for this client call */
-               trace_rxrpc_rx_eproto(call, 0, tracepoint_string("late_send"));
-               ret = -EPROTO;
-               break;
-       }
 
        if (!dropped_lock)
                mutex_unlock(&call->user_mutex);
@@ -747,24 +784,20 @@ EXPORT_SYMBOL(rxrpc_kernel_send_data);
  * @call: The call to be aborted
  * @abort_code: The abort code to stick into the ABORT packet
  * @error: Local error value
- * @why: 3-char string indicating why.
+ * @why: Indication as to why.
  *
  * Allow a kernel service to abort a call, if it's still in an abortable state
  * and return true if the call was aborted, false if it was already complete.
  */
 bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
-                            u32 abort_code, int error, const char *why)
+                            u32 abort_code, int error, enum rxrpc_abort_reason why)
 {
        bool aborted;
 
-       _enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why);
+       _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why);
 
        mutex_lock(&call->user_mutex);
-
-       aborted = rxrpc_abort_call(why, call, 0, abort_code, error);
-       if (aborted)
-               rxrpc_send_abort_packet(call);
-
+       aborted = rxrpc_propose_abort(call, abort_code, error, why);
        mutex_unlock(&call->user_mutex);
        return aborted;
 }
index ff47ce4..6b26bdb 100644 (file)
@@ -134,6 +134,11 @@ static int valid_label(const struct nlattr *attr,
 {
        const u32 *label = nla_data(attr);
 
+       if (nla_len(attr) != sizeof(*label)) {
+               NL_SET_ERR_MSG_MOD(extack, "Invalid MPLS label length");
+               return -EINVAL;
+       }
+
        if (*label & ~MPLS_LABEL_MASK || *label == MPLS_LABEL_IMPLNULL) {
                NL_SET_ERR_MSG_MOD(extack, "MPLS label out of range");
                return -EINVAL;
@@ -145,7 +150,8 @@ static int valid_label(const struct nlattr *attr,
 static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = {
        [TCA_MPLS_PARMS]        = NLA_POLICY_EXACT_LEN(sizeof(struct tc_mpls)),
        [TCA_MPLS_PROTO]        = { .type = NLA_U16 },
-       [TCA_MPLS_LABEL]        = NLA_POLICY_VALIDATE_FN(NLA_U32, valid_label),
+       [TCA_MPLS_LABEL]        = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
+                                                        valid_label),
        [TCA_MPLS_TC]           = NLA_POLICY_RANGE(NLA_U8, 0, 7),
        [TCA_MPLS_TTL]          = NLA_POLICY_MIN(NLA_U8, 1),
        [TCA_MPLS_BOS]          = NLA_POLICY_RANGE(NLA_U8, 0, 1),
index 2317db0..72d2c20 100644 (file)
@@ -1133,6 +1133,11 @@ skip:
                        return -ENOENT;
                }
 
+               if (new && new->ops == &noqueue_qdisc_ops) {
+                       NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
+                       return -EINVAL;
+               }
+
                err = cops->graft(parent, cl, new, &old, extack);
                if (err)
                        return err;
index 148bb0a..acb822b 100644 (file)
@@ -923,7 +923,7 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
         * rejecting the server-computed MIC in this somewhat rare case,
         * do not use splice with the GSS integrity service.
         */
-       __clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+       clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
 
        /* Did we already verify the signature on the original pass through? */
        if (rqstp->rq_deferred)
@@ -990,7 +990,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
        int pad, remaining_len, offset;
        u32 rseqno;
 
-       __clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+       clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
 
        priv_len = svc_getnl(&buf->head[0]);
        if (rqstp->rq_deferred) {
index 85f0c3c..f066228 100644 (file)
@@ -1243,10 +1243,10 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
                goto err_short_len;
 
        /* Will be turned off by GSS integrity and privacy services */
-       __set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+       set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
        /* Will be turned off only when NFSv4 Sessions are used */
-       __set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
-       __clear_bit(RQ_DROPME, &rqstp->rq_flags);
+       set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
+       clear_bit(RQ_DROPME, &rqstp->rq_flags);
 
        svc_putu32(resv, rqstp->rq_xid);
 
index 2106003..c2ce125 100644 (file)
@@ -1238,7 +1238,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
        trace_svc_defer(rqstp);
        svc_xprt_get(rqstp->rq_xprt);
        dr->xprt = rqstp->rq_xprt;
-       __set_bit(RQ_DROPME, &rqstp->rq_flags);
+       set_bit(RQ_DROPME, &rqstp->rq_flags);
 
        dr->handle.revisit = svc_revisit;
        return &dr->handle;
index 0157143..815baf3 100644 (file)
@@ -298,9 +298,9 @@ static void svc_sock_setbufsize(struct svc_sock *svsk, unsigned int nreqs)
 static void svc_sock_secure_port(struct svc_rqst *rqstp)
 {
        if (svc_port_is_privileged(svc_addr(rqstp)))
-               __set_bit(RQ_SECURE, &rqstp->rq_flags);
+               set_bit(RQ_SECURE, &rqstp->rq_flags);
        else
-               __clear_bit(RQ_SECURE, &rqstp->rq_flags);
+               clear_bit(RQ_SECURE, &rqstp->rq_flags);
 }
 
 /*
@@ -1008,9 +1008,9 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
        rqstp->rq_xprt_ctxt   = NULL;
        rqstp->rq_prot        = IPPROTO_TCP;
        if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags))
-               __set_bit(RQ_LOCAL, &rqstp->rq_flags);
+               set_bit(RQ_LOCAL, &rqstp->rq_flags);
        else
-               __clear_bit(RQ_LOCAL, &rqstp->rq_flags);
+               clear_bit(RQ_LOCAL, &rqstp->rq_flags);
 
        p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
        calldir = p[1];
index 199fa01..94b20fb 100644 (file)
@@ -602,7 +602,7 @@ static int svc_rdma_has_wspace(struct svc_xprt *xprt)
 
 static void svc_rdma_secure_port(struct svc_rqst *rqstp)
 {
-       __set_bit(RQ_SECURE, &rqstp->rq_flags);
+       set_bit(RQ_SECURE, &rqstp->rq_flags);
 }
 
 static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
index 49ddc48..5e000fd 100644 (file)
@@ -1179,8 +1179,9 @@ void tipc_node_check_dest(struct net *net, u32 addr,
        bool addr_match = false;
        bool sign_match = false;
        bool link_up = false;
+       bool link_is_reset = false;
        bool accept_addr = false;
-       bool reset = true;
+       bool reset = false;
        char *if_name;
        unsigned long intv;
        u16 session;
@@ -1200,14 +1201,14 @@ void tipc_node_check_dest(struct net *net, u32 addr,
        /* Prepare to validate requesting node's signature and media address */
        l = le->link;
        link_up = l && tipc_link_is_up(l);
+       link_is_reset = l && tipc_link_is_reset(l);
        addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
        sign_match = (signature == n->signature);
 
        /* These three flags give us eight permutations: */
 
        if (sign_match && addr_match && link_up) {
-               /* All is fine. Do nothing. */
-               reset = false;
+               /* All is fine. Ignore requests. */
                /* Peer node is not a container/local namespace */
                if (!n->peer_hash_mix)
                        n->peer_hash_mix = hash_mixes;
@@ -1232,6 +1233,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
                 */
                accept_addr = true;
                *respond = true;
+               reset = true;
        } else if (!sign_match && addr_match && link_up) {
                /* Peer node rebooted. Two possibilities:
                 *  - Delayed re-discovery; this link endpoint has already
@@ -1263,6 +1265,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
                n->signature = signature;
                accept_addr = true;
                *respond = true;
+               reset = true;
        }
 
        if (!accept_addr)
@@ -1291,6 +1294,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
                tipc_link_fsm_evt(l, LINK_RESET_EVT);
                if (n->state == NODE_FAILINGOVER)
                        tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+               link_is_reset = tipc_link_is_reset(l);
                le->link = l;
                n->link_cnt++;
                tipc_node_calculate_timer(n, l);
@@ -1303,7 +1307,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
        memcpy(&le->maddr, maddr, sizeof(*maddr));
 exit:
        tipc_node_write_unlock(n);
-       if (reset && l && !tipc_link_is_reset(l))
+       if (reset && !link_is_reset)
                tipc_node_link_down(n, b->identity, false);
        tipc_node_put(n);
 }
index 49946cb..10176de 100644 (file)
@@ -18,6 +18,7 @@ quiet_cmd_cc_o_c = CC      $@
        $(call if_changed_dep,cc_o_c)
 
 ifdef CONFIG_MODULES
+KASAN_SANITIZE_.vmlinux.export.o := n
 targets += .vmlinux.export.o
 vmlinux: .vmlinux.export.o
 endif
index 50e7ba6..82aa1af 100644 (file)
@@ -1203,14 +1203,19 @@ static int snd_ctl_elem_read(struct snd_card *card,
        const u32 pattern = 0xdeadbeef;
        int ret;
 
+       down_read(&card->controls_rwsem);
        kctl = snd_ctl_find_id(card, &control->id);
-       if (kctl == NULL)
-               return -ENOENT;
+       if (kctl == NULL) {
+               ret = -ENOENT;
+               goto unlock;
+       }
 
        index_offset = snd_ctl_get_ioff(kctl, &control->id);
        vd = &kctl->vd[index_offset];
-       if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL)
-               return -EPERM;
+       if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL) {
+               ret = -EPERM;
+               goto unlock;
+       }
 
        snd_ctl_build_ioff(&control->id, kctl, index_offset);
 
@@ -1220,7 +1225,7 @@ static int snd_ctl_elem_read(struct snd_card *card,
        info.id = control->id;
        ret = __snd_ctl_elem_info(card, kctl, &info, NULL);
        if (ret < 0)
-               return ret;
+               goto unlock;
 #endif
 
        if (!snd_ctl_skip_validation(&info))
@@ -1230,7 +1235,7 @@ static int snd_ctl_elem_read(struct snd_card *card,
                ret = kctl->get(kctl, control);
        snd_power_unref(card);
        if (ret < 0)
-               return ret;
+               goto unlock;
        if (!snd_ctl_skip_validation(&info) &&
            sanity_check_elem_value(card, control, &info, pattern) < 0) {
                dev_err(card->dev,
@@ -1238,8 +1243,11 @@ static int snd_ctl_elem_read(struct snd_card *card,
                        control->id.iface, control->id.device,
                        control->id.subdevice, control->id.name,
                        control->id.index);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto unlock;
        }
+unlock:
+       up_read(&card->controls_rwsem);
        return ret;
 }
 
@@ -1253,9 +1261,7 @@ static int snd_ctl_elem_read_user(struct snd_card *card,
        if (IS_ERR(control))
                return PTR_ERR(control);
 
-       down_read(&card->controls_rwsem);
        result = snd_ctl_elem_read(card, control);
-       up_read(&card->controls_rwsem);
        if (result < 0)
                goto error;
 
index f975cc8..3cadd40 100644 (file)
@@ -530,12 +530,11 @@ static ssize_t set_led_id(struct snd_ctl_led_card *led_card, const char *buf, si
                          bool attach)
 {
        char buf2[256], *s, *os;
-       size_t len = max(sizeof(s) - 1, count);
        struct snd_ctl_elem_id id;
        int err;
 
-       strncpy(buf2, buf, len);
-       buf2[len] = '\0';
+       if (strscpy(buf2, buf, sizeof(buf2)) < 0)
+               return -E2BIG;
        memset(&id, 0, sizeof(id));
        id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
        s = buf2;
index 91842c0..f7815ee 100644 (file)
@@ -598,8 +598,8 @@ static int cs35l41_system_suspend(struct device *dev)
        dev_dbg(cs35l41->dev, "System Suspend\n");
 
        if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH) {
-               dev_err(cs35l41->dev, "System Suspend not supported\n");
-               return -EINVAL;
+               dev_err_once(cs35l41->dev, "System Suspend not supported\n");
+               return 0; /* don't block the whole system suspend */
        }
 
        ret = pm_runtime_force_suspend(dev);
@@ -624,8 +624,8 @@ static int cs35l41_system_resume(struct device *dev)
        dev_dbg(cs35l41->dev, "System Resume\n");
 
        if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH) {
-               dev_err(cs35l41->dev, "System Resume not supported\n");
-               return -EINVAL;
+               dev_err_once(cs35l41->dev, "System Resume not supported\n");
+               return 0; /* don't block the whole system resume */
        }
 
        if (cs35l41->reset_gpio) {
@@ -647,6 +647,15 @@ static int cs35l41_system_resume(struct device *dev)
        return ret;
 }
 
+static int cs35l41_runtime_idle(struct device *dev)
+{
+       struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+
+       if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH)
+               return -EBUSY; /* suspend not supported yet on this model */
+       return 0;
+}
+
 static int cs35l41_runtime_suspend(struct device *dev)
 {
        struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
@@ -1536,7 +1545,8 @@ void cs35l41_hda_remove(struct device *dev)
 EXPORT_SYMBOL_NS_GPL(cs35l41_hda_remove, SND_HDA_SCODEC_CS35L41);
 
 const struct dev_pm_ops cs35l41_hda_pm_ops = {
-       RUNTIME_PM_OPS(cs35l41_runtime_suspend, cs35l41_runtime_resume, NULL)
+       RUNTIME_PM_OPS(cs35l41_runtime_suspend, cs35l41_runtime_resume,
+                      cs35l41_runtime_idle)
        SYSTEM_SLEEP_PM_OPS(cs35l41_system_suspend, cs35l41_system_resume)
 };
 EXPORT_SYMBOL_NS_GPL(cs35l41_hda_pm_ops, SND_HDA_SCODEC_CS35L41);
index 386dd9d..9ea633f 100644 (file)
@@ -1981,6 +1981,7 @@ static const struct snd_pci_quirk force_connect_list[] = {
        SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
        SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
        SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
+       SND_PCI_QUIRK(0x103c, 0x8715, "HP", 1),
        SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
        SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1),
        {}
index 3794b52..6fab7c8 100644 (file)
@@ -3564,6 +3564,15 @@ static void alc256_init(struct hda_codec *codec)
        hda_nid_t hp_pin = alc_get_hp_pin(spec);
        bool hp_pin_sense;
 
+       if (spec->ultra_low_power) {
+               alc_update_coef_idx(codec, 0x03, 1<<1, 1<<1);
+               alc_update_coef_idx(codec, 0x08, 3<<2, 3<<2);
+               alc_update_coef_idx(codec, 0x08, 7<<4, 0);
+               alc_update_coef_idx(codec, 0x3b, 1<<15, 0);
+               alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
+               msleep(30);
+       }
+
        if (!hp_pin)
                hp_pin = 0x21;
 
@@ -3575,14 +3584,6 @@ static void alc256_init(struct hda_codec *codec)
                msleep(2);
 
        alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
-       if (spec->ultra_low_power) {
-               alc_update_coef_idx(codec, 0x03, 1<<1, 1<<1);
-               alc_update_coef_idx(codec, 0x08, 3<<2, 3<<2);
-               alc_update_coef_idx(codec, 0x08, 7<<4, 0);
-               alc_update_coef_idx(codec, 0x3b, 1<<15, 0);
-               alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
-               msleep(30);
-       }
 
        snd_hda_codec_write(codec, hp_pin, 0,
                            AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
@@ -3713,6 +3714,13 @@ static void alc225_init(struct hda_codec *codec)
        hda_nid_t hp_pin = alc_get_hp_pin(spec);
        bool hp1_pin_sense, hp2_pin_sense;
 
+       if (spec->ultra_low_power) {
+               alc_update_coef_idx(codec, 0x08, 0x0f << 2, 3<<2);
+               alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
+               alc_update_coef_idx(codec, 0x33, 1<<11, 0);
+               msleep(30);
+       }
+
        if (spec->codec_variant != ALC269_TYPE_ALC287 &&
                spec->codec_variant != ALC269_TYPE_ALC245)
                /* required only at boot or S3 and S4 resume time */
@@ -3734,12 +3742,6 @@ static void alc225_init(struct hda_codec *codec)
                msleep(2);
 
        alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
-       if (spec->ultra_low_power) {
-               alc_update_coef_idx(codec, 0x08, 0x0f << 2, 3<<2);
-               alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
-               alc_update_coef_idx(codec, 0x33, 1<<11, 0);
-               msleep(30);
-       }
 
        if (hp1_pin_sense || spec->ultra_low_power)
                snd_hda_codec_write(codec, hp_pin, 0,
@@ -4644,6 +4646,16 @@ static void alc285_fixup_hp_coef_micmute_led(struct hda_codec *codec,
        }
 }
 
+static void alc285_fixup_hp_gpio_micmute_led(struct hda_codec *codec,
+                               const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE)
+               spec->micmute_led_polarity = 1;
+       alc_fixup_hp_gpio_led(codec, action, 0, 0x04);
+}
+
 static void alc236_fixup_hp_coef_micmute_led(struct hda_codec *codec,
                                const struct hda_fixup *fix, int action)
 {
@@ -4665,6 +4677,13 @@ static void alc285_fixup_hp_mute_led(struct hda_codec *codec,
        alc285_fixup_hp_coef_micmute_led(codec, fix, action);
 }
 
+static void alc285_fixup_hp_spectre_x360_mute_led(struct hda_codec *codec,
+                               const struct hda_fixup *fix, int action)
+{
+       alc285_fixup_hp_mute_led_coefbit(codec, fix, action);
+       alc285_fixup_hp_gpio_micmute_led(codec, fix, action);
+}
+
 static void alc236_fixup_hp_mute_led(struct hda_codec *codec,
                                const struct hda_fixup *fix, int action)
 {
@@ -7106,6 +7125,7 @@ enum {
        ALC285_FIXUP_ASUS_G533Z_PINS,
        ALC285_FIXUP_HP_GPIO_LED,
        ALC285_FIXUP_HP_MUTE_LED,
+       ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED,
        ALC236_FIXUP_HP_GPIO_LED,
        ALC236_FIXUP_HP_MUTE_LED,
        ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
@@ -8486,6 +8506,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc285_fixup_hp_mute_led,
        },
+       [ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_hp_spectre_x360_mute_led,
+       },
        [ALC236_FIXUP_HP_GPIO_LED] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc236_fixup_hp_gpio_led,
@@ -9239,6 +9263,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x0b37, "Dell Inspiron 16 Plus 7620 2-in-1", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
        SND_PCI_QUIRK(0x1028, 0x0b71, "Dell Inspiron 16 Plus 7620", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
+       SND_PCI_QUIRK(0x1028, 0x0c03, "Dell Precision 5340", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0c19, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
        SND_PCI_QUIRK(0x1028, 0x0c1a, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
        SND_PCI_QUIRK(0x1028, 0x0c1b, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
@@ -9327,6 +9352,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
        SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
        SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+       SND_PCI_QUIRK(0x103c, 0x86f9, "HP Spectre x360 13-aw0xxx", ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
@@ -9406,6 +9432,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
index 1f0b552..0d283e4 100644 (file)
@@ -209,6 +209,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
        {
                .driver_data = &acp6x_card,
                .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "M5402RA"),
+               }
+       },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "Alienware"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"),
                }
@@ -220,6 +227,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 14 2022"),
                }
        },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Razer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Blade 14 (2022) - RZ09-0427"),
+               }
+       },
        {}
 };
 
index 644300e..fcf4fba 100644 (file)
@@ -177,8 +177,20 @@ static int rt9120_codec_probe(struct snd_soc_component *comp)
        return 0;
 }
 
+static int rt9120_codec_suspend(struct snd_soc_component *comp)
+{
+       return pm_runtime_force_suspend(comp->dev);
+}
+
+static int rt9120_codec_resume(struct snd_soc_component *comp)
+{
+       return pm_runtime_force_resume(comp->dev);
+}
+
 static const struct snd_soc_component_driver rt9120_component_driver = {
        .probe = rt9120_codec_probe,
+       .suspend = rt9120_codec_suspend,
+       .resume = rt9120_codec_resume,
        .controls = rt9120_snd_controls,
        .num_controls = ARRAY_SIZE(rt9120_snd_controls),
        .dapm_widgets = rt9120_dapm_widgets,
index ca6a01a..791d873 100644 (file)
@@ -697,6 +697,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
        int dcs_mask;
        int dcs_l, dcs_r;
        int dcs_l_reg, dcs_r_reg;
+       int an_out_reg;
        int timeout;
        int pwr_reg;
 
@@ -712,6 +713,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
                dcs_mask = WM8904_DCS_ENA_CHAN_0 | WM8904_DCS_ENA_CHAN_1;
                dcs_r_reg = WM8904_DC_SERVO_8;
                dcs_l_reg = WM8904_DC_SERVO_9;
+               an_out_reg = WM8904_ANALOGUE_OUT1_LEFT;
                dcs_l = 0;
                dcs_r = 1;
                break;
@@ -720,6 +722,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
                dcs_mask = WM8904_DCS_ENA_CHAN_2 | WM8904_DCS_ENA_CHAN_3;
                dcs_r_reg = WM8904_DC_SERVO_6;
                dcs_l_reg = WM8904_DC_SERVO_7;
+               an_out_reg = WM8904_ANALOGUE_OUT2_LEFT;
                dcs_l = 2;
                dcs_r = 3;
                break;
@@ -792,6 +795,10 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
                snd_soc_component_update_bits(component, reg,
                                    WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP,
                                    WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP);
+
+               /* Update volume, requires PGA to be powered */
+               val = snd_soc_component_read(component, an_out_reg);
+               snd_soc_component_write(component, an_out_reg, val);
                break;
 
        case SND_SOC_DAPM_POST_PMU:
index c836848..8d14b55 100644 (file)
@@ -121,11 +121,11 @@ static const struct snd_soc_dapm_route audio_map[] = {
 
 static const struct snd_soc_dapm_route audio_map_ac97[] = {
        /* 1st half -- Normal DAPM routes */
-       {"Playback",  NULL, "AC97 Playback"},
-       {"AC97 Capture",  NULL, "Capture"},
+       {"AC97 Playback",  NULL, "CPU AC97 Playback"},
+       {"CPU AC97 Capture",  NULL, "AC97 Capture"},
        /* 2nd half -- ASRC DAPM routes */
-       {"AC97 Playback",  NULL, "ASRC-Playback"},
-       {"ASRC-Capture",  NULL, "AC97 Capture"},
+       {"CPU AC97 Playback",  NULL, "ASRC-Playback"},
+       {"ASRC-Capture",  NULL, "CPU AC97 Capture"},
 };
 
 static const struct snd_soc_dapm_route audio_map_tx[] = {
index 7b17f15..94341e4 100644 (file)
@@ -315,21 +315,21 @@ static int hwvad_detected(struct snd_kcontrol *kcontrol,
 
 static const struct snd_kcontrol_new fsl_micfil_snd_controls[] = {
        SOC_SINGLE_SX_TLV("CH0 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(0), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(0), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH1 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(1), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(1), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH2 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(2), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(2), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH3 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(3), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(3), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH4 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(4), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(4), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH5 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(5), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(5), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH6 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(6), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(6), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH7 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(7), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(7), 0x8, 0xF, gain_tlv),
        SOC_ENUM_EXT("MICFIL Quality Select",
                     fsl_micfil_quality_enum,
                     micfil_quality_get, micfil_quality_set),
index c9e0e31..46a5355 100644 (file)
@@ -1189,14 +1189,14 @@ static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
        .symmetric_channels = 1,
        .probe = fsl_ssi_dai_probe,
        .playback = {
-               .stream_name = "AC97 Playback",
+               .stream_name = "CPU AC97 Playback",
                .channels_min = 2,
                .channels_max = 2,
                .rates = SNDRV_PCM_RATE_8000_48000,
                .formats = SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S20,
        },
        .capture = {
-               .stream_name = "AC97 Capture",
+               .stream_name = "CPU AC97 Capture",
                .channels_min = 2,
                .channels_max = 2,
                .rates = SNDRV_PCM_RATE_48000,
index a472de1..99308ed 100644 (file)
@@ -554,10 +554,12 @@ config SND_SOC_INTEL_SOF_NAU8825_MACH
        select SND_SOC_RT1015P
        select SND_SOC_MAX98373_I2C
        select SND_SOC_MAX98357A
+       select SND_SOC_NAU8315
        select SND_SOC_DMIC
        select SND_SOC_HDAC_HDMI
        select SND_SOC_INTEL_HDA_DSP_COMMON
        select SND_SOC_INTEL_SOF_MAXIM_COMMON
+       select SND_SOC_INTEL_SOF_REALTEK_COMMON
        help
           This adds support for ASoC machine driver for SOF platforms
           with nau8825 codec.
index 2788022..a800854 100644 (file)
@@ -48,6 +48,7 @@
 #define SOF_MAX98373_SPEAKER_AMP_PRESENT       BIT(15)
 #define SOF_MAX98360A_SPEAKER_AMP_PRESENT      BIT(16)
 #define SOF_RT1015P_SPEAKER_AMP_PRESENT        BIT(17)
+#define SOF_NAU8318_SPEAKER_AMP_PRESENT        BIT(18)
 
 static unsigned long sof_nau8825_quirk = SOF_NAU8825_SSP_CODEC(0);
 
@@ -338,6 +339,13 @@ static struct snd_soc_dai_link_component rt1019p_component[] = {
        }
 };
 
+static struct snd_soc_dai_link_component nau8318_components[] = {
+       {
+               .name = "NVTN2012:00",
+               .dai_name = "nau8315-hifi",
+       }
+};
+
 static struct snd_soc_dai_link_component dummy_component[] = {
        {
                .name = "snd-soc-dummy",
@@ -486,6 +494,11 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
                        max_98360a_dai_link(&links[id]);
                } else if (sof_nau8825_quirk & SOF_RT1015P_SPEAKER_AMP_PRESENT) {
                        sof_rt1015p_dai_link(&links[id]);
+               } else if (sof_nau8825_quirk &
+                               SOF_NAU8318_SPEAKER_AMP_PRESENT) {
+                       links[id].codecs = nau8318_components;
+                       links[id].num_codecs = ARRAY_SIZE(nau8318_components);
+                       links[id].init = speaker_codec_init;
                } else {
                        goto devm_err;
                }
@@ -618,7 +631,7 @@ static const struct platform_device_id board_ids[] = {
 
        },
        {
-               .name = "adl_rt1019p_nau8825",
+               .name = "adl_rt1019p_8825",
                .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
                                        SOF_SPEAKER_AMP_PRESENT |
                                        SOF_RT1019P_SPEAKER_AMP_PRESENT |
@@ -626,7 +639,7 @@ static const struct platform_device_id board_ids[] = {
                                        SOF_NAU8825_NUM_HDMIDEV(4)),
        },
        {
-               .name = "adl_max98373_nau8825",
+               .name = "adl_max98373_8825",
                .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
                                        SOF_SPEAKER_AMP_PRESENT |
                                        SOF_MAX98373_SPEAKER_AMP_PRESENT |
@@ -637,7 +650,7 @@ static const struct platform_device_id board_ids[] = {
        },
        {
                /* The limitation of length of char array, shorten the name */
-               .name = "adl_mx98360a_nau8825",
+               .name = "adl_mx98360a_8825",
                .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
                                        SOF_SPEAKER_AMP_PRESENT |
                                        SOF_MAX98360A_SPEAKER_AMP_PRESENT |
@@ -648,7 +661,7 @@ static const struct platform_device_id board_ids[] = {
 
        },
        {
-               .name = "adl_rt1015p_nau8825",
+               .name = "adl_rt1015p_8825",
                .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
                                        SOF_SPEAKER_AMP_PRESENT |
                                        SOF_RT1015P_SPEAKER_AMP_PRESENT |
@@ -657,6 +670,16 @@ static const struct platform_device_id board_ids[] = {
                                        SOF_BT_OFFLOAD_SSP(2) |
                                        SOF_SSP_BT_OFFLOAD_PRESENT),
        },
+       {
+               .name = "adl_nau8318_8825",
+               .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
+                                       SOF_SPEAKER_AMP_PRESENT |
+                                       SOF_NAU8318_SPEAKER_AMP_PRESENT |
+                                       SOF_NAU8825_SSP_AMP(1) |
+                                       SOF_NAU8825_NUM_HDMIDEV(4) |
+                                       SOF_BT_OFFLOAD_SSP(2) |
+                                       SOF_SSP_BT_OFFLOAD_PRESENT),
+       },
        { }
 };
 MODULE_DEVICE_TABLE(platform, board_ids);
index 60aee56..56ee5fe 100644 (file)
@@ -450,6 +450,11 @@ static const struct snd_soc_acpi_codecs adl_lt6911_hdmi = {
        .codecs = {"INTC10B0"}
 };
 
+static const struct snd_soc_acpi_codecs adl_nau8318_amp = {
+       .num_codecs = 1,
+       .codecs = {"NVTN2012"}
+};
+
 struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
        {
                .comp_ids = &adl_rt5682_rt5682s_hp,
@@ -474,21 +479,21 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
        },
        {
                .id = "10508825",
-               .drv_name = "adl_rt1019p_nau8825",
+               .drv_name = "adl_rt1019p_8825",
                .machine_quirk = snd_soc_acpi_codec_list,
                .quirk_data = &adl_rt1019p_amp,
                .sof_tplg_filename = "sof-adl-rt1019-nau8825.tplg",
        },
        {
                .id = "10508825",
-               .drv_name = "adl_max98373_nau8825",
+               .drv_name = "adl_max98373_8825",
                .machine_quirk = snd_soc_acpi_codec_list,
                .quirk_data = &adl_max98373_amp,
                .sof_tplg_filename = "sof-adl-max98373-nau8825.tplg",
        },
        {
                .id = "10508825",
-               .drv_name = "adl_mx98360a_nau8825",
+               .drv_name = "adl_mx98360a_8825",
                .machine_quirk = snd_soc_acpi_codec_list,
                .quirk_data = &adl_max98360a_amp,
                .sof_tplg_filename = "sof-adl-max98360a-nau8825.tplg",
@@ -502,13 +507,20 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
        },
        {
                .id = "10508825",
-               .drv_name = "adl_rt1015p_nau8825",
+               .drv_name = "adl_rt1015p_8825",
                .machine_quirk = snd_soc_acpi_codec_list,
                .quirk_data = &adl_rt1015p_amp,
                .sof_tplg_filename = "sof-adl-rt1015-nau8825.tplg",
        },
        {
                .id = "10508825",
+               .drv_name = "adl_nau8318_8825",
+               .machine_quirk = snd_soc_acpi_codec_list,
+               .quirk_data = &adl_nau8318_amp,
+               .sof_tplg_filename = "sof-adl-nau8318-nau8825.tplg",
+       },
+       {
+               .id = "10508825",
                .drv_name = "sof_nau8825",
                .sof_tplg_filename = "sof-adl-nau8825.tplg",
        },
index 31b4311..07f96a1 100644 (file)
@@ -203,6 +203,25 @@ static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link2_rt1316_link01_rt71
        {}
 };
 
+static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link2_rt1316_link01[] = {
+       {
+               .mask = BIT(2),
+               .num_adr = ARRAY_SIZE(rt711_sdca_2_adr),
+               .adr_d = rt711_sdca_2_adr,
+       },
+       {
+               .mask = BIT(0),
+               .num_adr = ARRAY_SIZE(rt1316_0_group2_adr),
+               .adr_d = rt1316_0_group2_adr,
+       },
+       {
+               .mask = BIT(1),
+               .num_adr = ARRAY_SIZE(rt1316_1_group2_adr),
+               .adr_d = rt1316_1_group2_adr,
+       },
+       {}
+};
+
 static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link0_rt1318_link12_rt714_link3[] = {
        {
                .mask = BIT(0),
@@ -227,6 +246,25 @@ static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link0_rt1318_link12_rt71
        {}
 };
 
+static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link0_rt1318_link12[] = {
+       {
+               .mask = BIT(0),
+               .num_adr = ARRAY_SIZE(rt711_sdca_0_adr),
+               .adr_d = rt711_sdca_0_adr,
+       },
+       {
+               .mask = BIT(1),
+               .num_adr = ARRAY_SIZE(rt1318_1_group1_adr),
+               .adr_d = rt1318_1_group1_adr,
+       },
+       {
+               .mask = BIT(2),
+               .num_adr = ARRAY_SIZE(rt1318_2_group1_adr),
+               .adr_d = rt1318_2_group1_adr,
+       },
+       {}
+};
+
 static const struct snd_soc_acpi_link_adr rpl_sdw_rt1316_link12_rt714_link0[] = {
        {
                .mask = BIT(1),
@@ -272,12 +310,24 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_sdw_machines[] = {
                .sof_tplg_filename = "sof-rpl-rt711-l0-rt1318-l12-rt714-l3.tplg",
        },
        {
+               .link_mask = 0x7, /* rt711 on link0 & two rt1318s on link1 and link2 */
+               .links = rpl_sdw_rt711_link0_rt1318_link12,
+               .drv_name = "sof_sdw",
+               .sof_tplg_filename = "sof-rpl-rt711-l0-rt1318-l12.tplg",
+       },
+       {
                .link_mask = 0x7, /* rt714 on link0 & two rt1316s on link1 and link2 */
                .links = rpl_sdw_rt1316_link12_rt714_link0,
                .drv_name = "sof_sdw",
                .sof_tplg_filename = "sof-rpl-rt1316-l12-rt714-l0.tplg",
        },
        {
+               .link_mask = 0x7, /* rt711 on link2 & two rt1316s on link0 and link1 */
+               .links = rpl_sdw_rt711_link2_rt1316_link01,
+               .drv_name = "sof_sdw",
+               .sof_tplg_filename = "sof-rpl-rt711-l2-rt1316-l01.tplg",
+       },
+       {
                .link_mask = 0x1, /* link0 required */
                .links = rpl_rvp,
                .drv_name = "sof_sdw",
index 363fa4d..b027fba 100644 (file)
@@ -182,10 +182,12 @@ config SND_SOC_MT8186_MT6366_DA7219_MAX98357
          If unsure select "N".
 
 config SND_SOC_MT8186_MT6366_RT1019_RT5682S
-       tristate "ASoC Audio driver for MT8186 with RT1019 RT5682S codec"
+       tristate "ASoC Audio driver for MT8186 with RT1019 RT5682S MAX98357A/MAX98360 codec"
        depends on I2C && GPIOLIB
        depends on SND_SOC_MT8186 && MTK_PMIC_WRAP
+       select SND_SOC_MAX98357A
        select SND_SOC_MT6358
+       select SND_SOC_MAX98357A
        select SND_SOC_RT1015P
        select SND_SOC_RT5682S
        select SND_SOC_BT_SCO
index 8f77a0b..af44e33 100644 (file)
@@ -1083,6 +1083,21 @@ static struct snd_soc_card mt8186_mt6366_rt1019_rt5682s_soc_card = {
        .num_configs = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_codec_conf),
 };
 
+static struct snd_soc_card mt8186_mt6366_rt5682s_max98360_soc_card = {
+       .name = "mt8186_rt5682s_max98360",
+       .owner = THIS_MODULE,
+       .dai_link = mt8186_mt6366_rt1019_rt5682s_dai_links,
+       .num_links = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_dai_links),
+       .controls = mt8186_mt6366_rt1019_rt5682s_controls,
+       .num_controls = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_controls),
+       .dapm_widgets = mt8186_mt6366_rt1019_rt5682s_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_widgets),
+       .dapm_routes = mt8186_mt6366_rt1019_rt5682s_routes,
+       .num_dapm_routes = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_routes),
+       .codec_conf = mt8186_mt6366_rt1019_rt5682s_codec_conf,
+       .num_configs = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_codec_conf),
+};
+
 static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
 {
        struct snd_soc_card *card;
@@ -1232,9 +1247,14 @@ err_adsp_node:
 
 #if IS_ENABLED(CONFIG_OF)
 static const struct of_device_id mt8186_mt6366_rt1019_rt5682s_dt_match[] = {
-       {       .compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound",
+       {
+               .compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound",
                .data = &mt8186_mt6366_rt1019_rt5682s_soc_card,
        },
+       {
+               .compatible = "mediatek,mt8186-mt6366-rt5682s-max98360-sound",
+               .data = &mt8186_mt6366_rt5682s_max98360_soc_card,
+       },
        {}
 };
 MODULE_DEVICE_TABLE(of, mt8186_mt6366_rt1019_rt5682s_dt_match);
index 96a6d47..e7b00d1 100644 (file)
@@ -2,7 +2,6 @@
 menuconfig SND_SOC_QCOM
        tristate "ASoC support for QCOM platforms"
        depends on ARCH_QCOM || COMPILE_TEST
-       imply SND_SOC_QCOM_COMMON
        help
          Say Y or M if you want to add support to use audio devices
          in Qualcomm Technologies SOC-based platforms.
@@ -60,14 +59,16 @@ config SND_SOC_STORM
 config SND_SOC_APQ8016_SBC
        tristate "SoC Audio support for APQ8016 SBC platforms"
        select SND_SOC_LPASS_APQ8016
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
        help
          Support for Qualcomm Technologies LPASS audio block in
          APQ8016 SOC-based systems.
          Say Y if you want to use audio devices on MI2S.
 
 config SND_SOC_QCOM_COMMON
-       depends on SOUNDWIRE
+       tristate
+
+config SND_SOC_QCOM_SDW
        tristate
 
 config SND_SOC_QDSP6_COMMON
@@ -144,7 +145,7 @@ config SND_SOC_MSM8996
        depends on QCOM_APR
        depends on COMMON_CLK
        select SND_SOC_QDSP6
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
        help
          Support for Qualcomm Technologies LPASS audio block in
          APQ8096 SoC-based systems.
@@ -155,7 +156,7 @@ config SND_SOC_SDM845
        depends on QCOM_APR && I2C && SOUNDWIRE
        depends on COMMON_CLK
        select SND_SOC_QDSP6
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
        select SND_SOC_RT5663
        select SND_SOC_MAX98927
        imply SND_SOC_CROS_EC_CODEC
@@ -169,7 +170,8 @@ config SND_SOC_SM8250
        depends on QCOM_APR && SOUNDWIRE
        depends on COMMON_CLK
        select SND_SOC_QDSP6
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_SDW
        help
          To add support for audio on Qualcomm Technologies Inc.
          SM8250 SoC-based systems.
@@ -180,7 +182,8 @@ config SND_SOC_SC8280XP
        depends on QCOM_APR && SOUNDWIRE
        depends on COMMON_CLK
        select SND_SOC_QDSP6
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_SDW
        help
          To add support for audio on Qualcomm Technologies Inc.
          SC8280XP SoC-based systems.
@@ -190,7 +193,7 @@ config SND_SOC_SC7180
        tristate "SoC Machine driver for SC7180 boards"
        depends on I2C && GPIOLIB
        depends on SOUNDWIRE || SOUNDWIRE=n
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
        select SND_SOC_LPASS_SC7180
        select SND_SOC_MAX98357A
        select SND_SOC_RT5682_I2C
@@ -204,7 +207,7 @@ config SND_SOC_SC7180
 config SND_SOC_SC7280
        tristate "SoC Machine driver for SC7280 boards"
        depends on I2C && SOUNDWIRE
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
        select SND_SOC_LPASS_SC7280
        select SND_SOC_MAX98357A
        select SND_SOC_WCD938X_SDW
index 8b97172..254350d 100644 (file)
@@ -28,6 +28,7 @@ snd-soc-sdm845-objs := sdm845.o
 snd-soc-sm8250-objs := sm8250.o
 snd-soc-sc8280xp-objs := sc8280xp.o
 snd-soc-qcom-common-objs := common.o
+snd-soc-qcom-sdw-objs := sdw.o
 
 obj-$(CONFIG_SND_SOC_STORM) += snd-soc-storm.o
 obj-$(CONFIG_SND_SOC_APQ8016_SBC) += snd-soc-apq8016-sbc.o
@@ -38,6 +39,7 @@ obj-$(CONFIG_SND_SOC_SC8280XP) += snd-soc-sc8280xp.o
 obj-$(CONFIG_SND_SOC_SDM845) += snd-soc-sdm845.o
 obj-$(CONFIG_SND_SOC_SM8250) += snd-soc-sm8250.o
 obj-$(CONFIG_SND_SOC_QCOM_COMMON) += snd-soc-qcom-common.o
+obj-$(CONFIG_SND_SOC_QCOM_SDW) += snd-soc-qcom-sdw.o
 
 #DSP lib
 obj-$(CONFIG_SND_SOC_QDSP6) += qdsp6/
index 49c74c1..96fe802 100644 (file)
@@ -180,120 +180,6 @@ err_put_np:
 }
 EXPORT_SYMBOL_GPL(qcom_snd_parse_of);
 
-int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
-                        struct sdw_stream_runtime *sruntime,
-                        bool *stream_prepared)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
-       int ret;
-
-       if (!sruntime)
-               return 0;
-
-       switch (cpu_dai->id) {
-       case WSA_CODEC_DMA_RX_0:
-       case WSA_CODEC_DMA_RX_1:
-       case RX_CODEC_DMA_RX_0:
-       case RX_CODEC_DMA_RX_1:
-       case TX_CODEC_DMA_TX_0:
-       case TX_CODEC_DMA_TX_1:
-       case TX_CODEC_DMA_TX_2:
-       case TX_CODEC_DMA_TX_3:
-               break;
-       default:
-               return 0;
-       }
-
-       if (*stream_prepared) {
-               sdw_disable_stream(sruntime);
-               sdw_deprepare_stream(sruntime);
-               *stream_prepared = false;
-       }
-
-       ret = sdw_prepare_stream(sruntime);
-       if (ret)
-               return ret;
-
-       /**
-        * NOTE: there is a strict hw requirement about the ordering of port
-        * enables and actual WSA881x PA enable. PA enable should only happen
-        * after soundwire ports are enabled if not DC on the line is
-        * accumulated resulting in Click/Pop Noise
-        * PA enable/mute are handled as part of codec DAPM and digital mute.
-        */
-
-       ret = sdw_enable_stream(sruntime);
-       if (ret) {
-               sdw_deprepare_stream(sruntime);
-               return ret;
-       }
-       *stream_prepared  = true;
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(qcom_snd_sdw_prepare);
-
-int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
-                          struct snd_pcm_hw_params *params,
-                          struct sdw_stream_runtime **psruntime)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_dai *codec_dai;
-       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
-       struct sdw_stream_runtime *sruntime;
-       int i;
-
-       switch (cpu_dai->id) {
-       case WSA_CODEC_DMA_RX_0:
-       case RX_CODEC_DMA_RX_0:
-       case RX_CODEC_DMA_RX_1:
-       case TX_CODEC_DMA_TX_0:
-       case TX_CODEC_DMA_TX_1:
-       case TX_CODEC_DMA_TX_2:
-       case TX_CODEC_DMA_TX_3:
-               for_each_rtd_codec_dais(rtd, i, codec_dai) {
-                       sruntime = snd_soc_dai_get_stream(codec_dai, substream->stream);
-                       if (sruntime != ERR_PTR(-ENOTSUPP))
-                               *psruntime = sruntime;
-               }
-               break;
-       }
-
-       return 0;
-
-}
-EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_params);
-
-int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
-                        struct sdw_stream_runtime *sruntime, bool *stream_prepared)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
-
-       switch (cpu_dai->id) {
-       case WSA_CODEC_DMA_RX_0:
-       case WSA_CODEC_DMA_RX_1:
-       case RX_CODEC_DMA_RX_0:
-       case RX_CODEC_DMA_RX_1:
-       case TX_CODEC_DMA_TX_0:
-       case TX_CODEC_DMA_TX_1:
-       case TX_CODEC_DMA_TX_2:
-       case TX_CODEC_DMA_TX_3:
-               if (sruntime && *stream_prepared) {
-                       sdw_disable_stream(sruntime);
-                       sdw_deprepare_stream(sruntime);
-                       *stream_prepared = false;
-               }
-               break;
-       default:
-               break;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_free);
-
 int qcom_snd_wcd_jack_setup(struct snd_soc_pcm_runtime *rtd,
                            struct snd_soc_jack *jack, bool *jack_setup)
 {
index 3ef5bb6..d7f80ee 100644 (file)
@@ -5,19 +5,9 @@
 #define __QCOM_SND_COMMON_H__
 
 #include <sound/soc.h>
-#include <linux/soundwire/sdw.h>
 
 int qcom_snd_parse_of(struct snd_soc_card *card);
 int qcom_snd_wcd_jack_setup(struct snd_soc_pcm_runtime *rtd,
                            struct snd_soc_jack *jack, bool *jack_setup);
 
-int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
-                        struct sdw_stream_runtime *runtime,
-                        bool *stream_prepared);
-int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
-                          struct snd_pcm_hw_params *params,
-                          struct sdw_stream_runtime **psruntime);
-int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
-                        struct sdw_stream_runtime *sruntime,
-                        bool *stream_prepared);
 #endif
index 5435384..dbdaaa8 100644 (file)
@@ -1037,10 +1037,11 @@ static void of_lpass_cpu_parse_dai_data(struct device *dev,
                                        struct lpass_data *data)
 {
        struct device_node *node;
-       int ret, id;
+       int ret, i, id;
 
        /* Allow all channels by default for backwards compatibility */
-       for (id = 0; id < data->variant->num_dai; id++) {
+       for (i = 0; i < data->variant->num_dai; i++) {
+               id = data->variant->dai_driver[i].id;
                data->mi2s_playback_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
                data->mi2s_capture_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
        }
index ade44ad..14d9fea 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/input-event-codes.h>
 #include "qdsp6/q6afe.h"
 #include "common.h"
+#include "sdw.h"
 
 #define DRIVER_NAME            "sc8280xp"
 
diff --git a/sound/soc/qcom/sdw.c b/sound/soc/qcom/sdw.c
new file mode 100644 (file)
index 0000000..1024951
--- /dev/null
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, Linaro Limited.
+// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
+#include <linux/module.h>
+#include <sound/soc.h>
+#include "qdsp6/q6afe.h"
+#include "sdw.h"
+
+int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
+                        struct sdw_stream_runtime *sruntime,
+                        bool *stream_prepared)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+       int ret;
+
+       if (!sruntime)
+               return 0;
+
+       switch (cpu_dai->id) {
+       case WSA_CODEC_DMA_RX_0:
+       case WSA_CODEC_DMA_RX_1:
+       case RX_CODEC_DMA_RX_0:
+       case RX_CODEC_DMA_RX_1:
+       case TX_CODEC_DMA_TX_0:
+       case TX_CODEC_DMA_TX_1:
+       case TX_CODEC_DMA_TX_2:
+       case TX_CODEC_DMA_TX_3:
+               break;
+       default:
+               return 0;
+       }
+
+       if (*stream_prepared) {
+               sdw_disable_stream(sruntime);
+               sdw_deprepare_stream(sruntime);
+               *stream_prepared = false;
+       }
+
+       ret = sdw_prepare_stream(sruntime);
+       if (ret)
+               return ret;
+
+       /**
+        * NOTE: there is a strict hw requirement about the ordering of port
+        * enables and actual WSA881x PA enable. PA enable should only happen
+        * after soundwire ports are enabled if not DC on the line is
+        * accumulated resulting in Click/Pop Noise
+        * PA enable/mute are handled as part of codec DAPM and digital mute.
+        */
+
+       ret = sdw_enable_stream(sruntime);
+       if (ret) {
+               sdw_deprepare_stream(sruntime);
+               return ret;
+       }
+       *stream_prepared  = true;
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_snd_sdw_prepare);
+
+int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
+                          struct snd_pcm_hw_params *params,
+                          struct sdw_stream_runtime **psruntime)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_dai *codec_dai;
+       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+       struct sdw_stream_runtime *sruntime;
+       int i;
+
+       switch (cpu_dai->id) {
+       case WSA_CODEC_DMA_RX_0:
+       case RX_CODEC_DMA_RX_0:
+       case RX_CODEC_DMA_RX_1:
+       case TX_CODEC_DMA_TX_0:
+       case TX_CODEC_DMA_TX_1:
+       case TX_CODEC_DMA_TX_2:
+       case TX_CODEC_DMA_TX_3:
+               for_each_rtd_codec_dais(rtd, i, codec_dai) {
+                       sruntime = snd_soc_dai_get_stream(codec_dai, substream->stream);
+                       if (sruntime != ERR_PTR(-ENOTSUPP))
+                               *psruntime = sruntime;
+               }
+               break;
+       }
+
+       return 0;
+
+}
+EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_params);
+
+int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
+                        struct sdw_stream_runtime *sruntime, bool *stream_prepared)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+
+       switch (cpu_dai->id) {
+       case WSA_CODEC_DMA_RX_0:
+       case WSA_CODEC_DMA_RX_1:
+       case RX_CODEC_DMA_RX_0:
+       case RX_CODEC_DMA_RX_1:
+       case TX_CODEC_DMA_TX_0:
+       case TX_CODEC_DMA_TX_1:
+       case TX_CODEC_DMA_TX_2:
+       case TX_CODEC_DMA_TX_3:
+               if (sruntime && *stream_prepared) {
+                       sdw_disable_stream(sruntime);
+                       sdw_deprepare_stream(sruntime);
+                       *stream_prepared = false;
+               }
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_free);
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/qcom/sdw.h b/sound/soc/qcom/sdw.h
new file mode 100644 (file)
index 0000000..d74cbb8
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
+#ifndef __QCOM_SND_SDW_H__
+#define __QCOM_SND_SDW_H__
+
+#include <linux/soundwire/sdw.h>
+
+int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
+                        struct sdw_stream_runtime *runtime,
+                        bool *stream_prepared);
+int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
+                          struct snd_pcm_hw_params *params,
+                          struct sdw_stream_runtime **psruntime);
+int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
+                        struct sdw_stream_runtime *sruntime,
+                        bool *stream_prepared);
+#endif
index 8dbe9ef..9626a9e 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/input-event-codes.h>
 #include "qdsp6/q6afe.h"
 #include "common.h"
+#include "sdw.h"
 
 #define DRIVER_NAME            "sm8250"
 #define MI2S_BCLK_RATE         1536000
index d9a3ce7..ade0507 100644 (file)
@@ -353,7 +353,9 @@ int snd_sof_dbg_init(struct snd_sof_dev *sdev)
                        return err;
        }
 
-       return 0;
+       return snd_sof_debugfs_buf_item(sdev, &sdev->fw_state,
+                                       sizeof(sdev->fw_state),
+                                       "fw_state", 0444);
 }
 EXPORT_SYMBOL_GPL(snd_sof_dbg_init);
 
index df740be..8722bbd 100644 (file)
@@ -182,7 +182,7 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
        const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
        const struct sof_ipc_tplg_ops *tplg_ops = sdev->ipc->ops->tplg;
        pm_message_t pm_state;
-       u32 target_state = 0;
+       u32 target_state = snd_sof_dsp_power_target(sdev);
        int ret;
 
        /* do nothing if dsp suspend callback is not set */
@@ -192,6 +192,9 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
        if (runtime_suspend && !sof_ops(sdev)->runtime_suspend)
                return 0;
 
+       if (tplg_ops && tplg_ops->tear_down_all_pipelines)
+               tplg_ops->tear_down_all_pipelines(sdev, false);
+
        if (sdev->fw_state != SOF_FW_BOOT_COMPLETE)
                goto suspend;
 
@@ -206,7 +209,6 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
                }
        }
 
-       target_state = snd_sof_dsp_power_target(sdev);
        pm_state.event = target_state;
 
        /* Skip to platform-specific suspend if DSP is entering D0 */
@@ -217,9 +219,6 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
                goto suspend;
        }
 
-       if (tplg_ops->tear_down_all_pipelines)
-               tplg_ops->tear_down_all_pipelines(sdev, false);
-
        /* suspend DMA trace */
        sof_fw_trace_suspend(sdev, pm_state);
 
index 41ac718..4727043 100644 (file)
@@ -471,7 +471,7 @@ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip,
        subs = find_matching_substream(chip, stream, target->sync_ep,
                                       target->fmt_type);
        if (!subs)
-               return sync_fmt;
+               goto end;
 
        high_score = 0;
        list_for_each_entry(fp, &subs->fmt_list, list) {
@@ -485,6 +485,7 @@ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip,
                }
        }
 
+ end:
        if (fixed_rate)
                *fixed_rate = snd_usb_pcm_has_fixed_rate(subs);
        return sync_fmt;
index 99a66d0..d959da7 100644 (file)
@@ -160,9 +160,12 @@ find_substream_format(struct snd_usb_substream *subs,
 bool snd_usb_pcm_has_fixed_rate(struct snd_usb_substream *subs)
 {
        const struct audioformat *fp;
-       struct snd_usb_audio *chip = subs->stream->chip;
+       struct snd_usb_audio *chip;
        int rate = -1;
 
+       if (!subs)
+               return false;
+       chip = subs->stream->chip;
        if (!(chip->quirk_flags & QUIRK_FLAG_FIXED_RATE))
                return false;
        list_for_each_entry(fp, &subs->fmt_list, list) {
@@ -525,6 +528,8 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
                if (snd_usb_endpoint_compatible(chip, subs->data_endpoint,
                                                fmt, hw_params))
                        goto unlock;
+               if (stop_endpoints(subs, false))
+                       sync_pending_stops(subs);
                close_endpoints(chip, subs);
        }
 
@@ -787,11 +792,27 @@ static int apply_hw_params_minmax(struct snd_interval *it, unsigned int rmin,
        return changed;
 }
 
+/* get the specified endpoint object that is being used by other streams
+ * (i.e. the parameter is locked)
+ */
+static const struct snd_usb_endpoint *
+get_endpoint_in_use(struct snd_usb_audio *chip, int endpoint,
+                   const struct snd_usb_endpoint *ref_ep)
+{
+       const struct snd_usb_endpoint *ep;
+
+       ep = snd_usb_get_endpoint(chip, endpoint);
+       if (ep && ep->cur_audiofmt && (ep != ref_ep || ep->opened > 1))
+               return ep;
+       return NULL;
+}
+
 static int hw_rule_rate(struct snd_pcm_hw_params *params,
                        struct snd_pcm_hw_rule *rule)
 {
        struct snd_usb_substream *subs = rule->private;
        struct snd_usb_audio *chip = subs->stream->chip;
+       const struct snd_usb_endpoint *ep;
        const struct audioformat *fp;
        struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
        unsigned int rmin, rmax, r;
@@ -803,6 +824,29 @@ static int hw_rule_rate(struct snd_pcm_hw_params *params,
        list_for_each_entry(fp, &subs->fmt_list, list) {
                if (!hw_check_valid_format(subs, params, fp))
                        continue;
+
+               ep = get_endpoint_in_use(chip, fp->endpoint,
+                                        subs->data_endpoint);
+               if (ep) {
+                       hwc_debug("rate limit %d for ep#%x\n",
+                                 ep->cur_rate, fp->endpoint);
+                       rmin = min(rmin, ep->cur_rate);
+                       rmax = max(rmax, ep->cur_rate);
+                       continue;
+               }
+
+               if (fp->implicit_fb) {
+                       ep = get_endpoint_in_use(chip, fp->sync_ep,
+                                                subs->sync_endpoint);
+                       if (ep) {
+                               hwc_debug("rate limit %d for sync_ep#%x\n",
+                                         ep->cur_rate, fp->sync_ep);
+                               rmin = min(rmin, ep->cur_rate);
+                               rmax = max(rmax, ep->cur_rate);
+                               continue;
+                       }
+               }
+
                r = snd_usb_endpoint_get_clock_rate(chip, fp->clock);
                if (r > 0) {
                        if (!snd_interval_test(it, r))
@@ -872,6 +916,8 @@ static int hw_rule_format(struct snd_pcm_hw_params *params,
                          struct snd_pcm_hw_rule *rule)
 {
        struct snd_usb_substream *subs = rule->private;
+       struct snd_usb_audio *chip = subs->stream->chip;
+       const struct snd_usb_endpoint *ep;
        const struct audioformat *fp;
        struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
        u64 fbits;
@@ -881,6 +927,27 @@ static int hw_rule_format(struct snd_pcm_hw_params *params,
        list_for_each_entry(fp, &subs->fmt_list, list) {
                if (!hw_check_valid_format(subs, params, fp))
                        continue;
+
+               ep = get_endpoint_in_use(chip, fp->endpoint,
+                                        subs->data_endpoint);
+               if (ep) {
+                       hwc_debug("format limit %d for ep#%x\n",
+                                 ep->cur_format, fp->endpoint);
+                       fbits |= pcm_format_to_bits(ep->cur_format);
+                       continue;
+               }
+
+               if (fp->implicit_fb) {
+                       ep = get_endpoint_in_use(chip, fp->sync_ep,
+                                                subs->sync_endpoint);
+                       if (ep) {
+                               hwc_debug("format limit %d for sync_ep#%x\n",
+                                         ep->cur_format, fp->sync_ep);
+                               fbits |= pcm_format_to_bits(ep->cur_format);
+                               continue;
+                       }
+               }
+
                fbits |= fp->formats;
        }
        return apply_hw_params_format_bits(fmt, fbits);
@@ -913,98 +980,95 @@ static int hw_rule_period_time(struct snd_pcm_hw_params *params,
        return apply_hw_params_minmax(it, pmin, UINT_MAX);
 }
 
-/* get the EP or the sync EP for implicit fb when it's already set up */
-static const struct snd_usb_endpoint *
-get_sync_ep_from_substream(struct snd_usb_substream *subs)
-{
-       struct snd_usb_audio *chip = subs->stream->chip;
-       const struct audioformat *fp;
-       const struct snd_usb_endpoint *ep;
-
-       list_for_each_entry(fp, &subs->fmt_list, list) {
-               ep = snd_usb_get_endpoint(chip, fp->endpoint);
-               if (ep && ep->cur_audiofmt) {
-                       /* if EP is already opened solely for this substream,
-                        * we still allow us to change the parameter; otherwise
-                        * this substream has to follow the existing parameter
-                        */
-                       if (ep->cur_audiofmt != subs->cur_audiofmt || ep->opened > 1)
-                               return ep;
-               }
-               if (!fp->implicit_fb)
-                       continue;
-               /* for the implicit fb, check the sync ep as well */
-               ep = snd_usb_get_endpoint(chip, fp->sync_ep);
-               if (ep && ep->cur_audiofmt)
-                       return ep;
-       }
-       return NULL;
-}
-
 /* additional hw constraints for implicit feedback mode */
-static int hw_rule_format_implicit_fb(struct snd_pcm_hw_params *params,
-                                     struct snd_pcm_hw_rule *rule)
-{
-       struct snd_usb_substream *subs = rule->private;
-       const struct snd_usb_endpoint *ep;
-       struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
-
-       ep = get_sync_ep_from_substream(subs);
-       if (!ep)
-               return 0;
-
-       hwc_debug("applying %s\n", __func__);
-       return apply_hw_params_format_bits(fmt, pcm_format_to_bits(ep->cur_format));
-}
-
-static int hw_rule_rate_implicit_fb(struct snd_pcm_hw_params *params,
-                                   struct snd_pcm_hw_rule *rule)
-{
-       struct snd_usb_substream *subs = rule->private;
-       const struct snd_usb_endpoint *ep;
-       struct snd_interval *it;
-
-       ep = get_sync_ep_from_substream(subs);
-       if (!ep)
-               return 0;
-
-       hwc_debug("applying %s\n", __func__);
-       it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
-       return apply_hw_params_minmax(it, ep->cur_rate, ep->cur_rate);
-}
-
 static int hw_rule_period_size_implicit_fb(struct snd_pcm_hw_params *params,
                                           struct snd_pcm_hw_rule *rule)
 {
        struct snd_usb_substream *subs = rule->private;
+       struct snd_usb_audio *chip = subs->stream->chip;
+       const struct audioformat *fp;
        const struct snd_usb_endpoint *ep;
        struct snd_interval *it;
+       unsigned int rmin, rmax;
 
-       ep = get_sync_ep_from_substream(subs);
-       if (!ep)
-               return 0;
-
-       hwc_debug("applying %s\n", __func__);
        it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
-       return apply_hw_params_minmax(it, ep->cur_period_frames,
-                                     ep->cur_period_frames);
+       hwc_debug("hw_rule_period_size: (%u,%u)\n", it->min, it->max);
+       rmin = UINT_MAX;
+       rmax = 0;
+       list_for_each_entry(fp, &subs->fmt_list, list) {
+               if (!hw_check_valid_format(subs, params, fp))
+                       continue;
+               ep = get_endpoint_in_use(chip, fp->endpoint,
+                                        subs->data_endpoint);
+               if (ep) {
+                       hwc_debug("period size limit %d for ep#%x\n",
+                                 ep->cur_period_frames, fp->endpoint);
+                       rmin = min(rmin, ep->cur_period_frames);
+                       rmax = max(rmax, ep->cur_period_frames);
+                       continue;
+               }
+
+               if (fp->implicit_fb) {
+                       ep = get_endpoint_in_use(chip, fp->sync_ep,
+                                                subs->sync_endpoint);
+                       if (ep) {
+                               hwc_debug("period size limit %d for sync_ep#%x\n",
+                                         ep->cur_period_frames, fp->sync_ep);
+                               rmin = min(rmin, ep->cur_period_frames);
+                               rmax = max(rmax, ep->cur_period_frames);
+                               continue;
+                       }
+               }
+       }
+
+       if (!rmax)
+               return 0; /* no limit by implicit fb */
+       return apply_hw_params_minmax(it, rmin, rmax);
 }
 
 static int hw_rule_periods_implicit_fb(struct snd_pcm_hw_params *params,
                                       struct snd_pcm_hw_rule *rule)
 {
        struct snd_usb_substream *subs = rule->private;
+       struct snd_usb_audio *chip = subs->stream->chip;
+       const struct audioformat *fp;
        const struct snd_usb_endpoint *ep;
        struct snd_interval *it;
+       unsigned int rmin, rmax;
 
-       ep = get_sync_ep_from_substream(subs);
-       if (!ep)
-               return 0;
-
-       hwc_debug("applying %s\n", __func__);
        it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIODS);
-       return apply_hw_params_minmax(it, ep->cur_buffer_periods,
-                                     ep->cur_buffer_periods);
+       hwc_debug("hw_rule_periods: (%u,%u)\n", it->min, it->max);
+       rmin = UINT_MAX;
+       rmax = 0;
+       list_for_each_entry(fp, &subs->fmt_list, list) {
+               if (!hw_check_valid_format(subs, params, fp))
+                       continue;
+               ep = get_endpoint_in_use(chip, fp->endpoint,
+                                        subs->data_endpoint);
+               if (ep) {
+                       hwc_debug("periods limit %d for ep#%x\n",
+                                 ep->cur_buffer_periods, fp->endpoint);
+                       rmin = min(rmin, ep->cur_buffer_periods);
+                       rmax = max(rmax, ep->cur_buffer_periods);
+                       continue;
+               }
+
+               if (fp->implicit_fb) {
+                       ep = get_endpoint_in_use(chip, fp->sync_ep,
+                                                subs->sync_endpoint);
+                       if (ep) {
+                               hwc_debug("periods limit %d for sync_ep#%x\n",
+                                         ep->cur_buffer_periods, fp->sync_ep);
+                               rmin = min(rmin, ep->cur_buffer_periods);
+                               rmax = max(rmax, ep->cur_buffer_periods);
+                               continue;
+                       }
+               }
+       }
+
+       if (!rmax)
+               return 0; /* no limit by implicit fb */
+       return apply_hw_params_minmax(it, rmin, rmax);
 }
 
 /*
@@ -1113,16 +1177,6 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
                return err;
 
        /* additional hw constraints for implicit fb */
-       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
-                                 hw_rule_format_implicit_fb, subs,
-                                 SNDRV_PCM_HW_PARAM_FORMAT, -1);
-       if (err < 0)
-               return err;
-       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
-                                 hw_rule_rate_implicit_fb, subs,
-                                 SNDRV_PCM_HW_PARAM_RATE, -1);
-       if (err < 0)
-               return err;
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
                                  hw_rule_period_size_implicit_fb, subs,
                                  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
index f75601c..f10f4e6 100644 (file)
@@ -1222,6 +1222,12 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip,
                        if (err < 0)
                                return err;
                }
+
+               /* try to set the interface... */
+               usb_set_interface(chip->dev, iface_no, 0);
+               snd_usb_init_pitch(chip, fp);
+               snd_usb_init_sample_rate(chip, fp, fp->rate_max);
+               usb_set_interface(chip->dev, iface_no, altno);
        }
        return 0;
 }
index 4041748..b66e037 100644 (file)
@@ -311,7 +311,7 @@ static int xen_drv_probe(struct xenbus_device *xb_dev,
        return xenbus_switch_state(xb_dev, XenbusStateInitialising);
 }
 
-static int xen_drv_remove(struct xenbus_device *dev)
+static void xen_drv_remove(struct xenbus_device *dev)
 {
        struct xen_snd_front_info *front_info = dev_get_drvdata(&dev->dev);
        int to = 100;
@@ -345,7 +345,6 @@ static int xen_drv_remove(struct xenbus_device *dev)
 
        xen_snd_drv_fini(front_info);
        xenbus_frontend_closed(dev);
-       return 0;
 }
 
 static const struct xenbus_device_id xen_drv_ids[] = {
index 5fc5b80..7380093 100644 (file)
@@ -192,6 +192,7 @@ struct sys_stat_struct {
 __asm__ (".section .text\n"
     ".weak __start\n"
     ".set nomips16\n"
+    ".set push\n"
     ".set    noreorder\n"
     ".option pic0\n"
     ".ent __start\n"
@@ -210,6 +211,7 @@ __asm__ (".section .text\n"
     "li $v0, 4001\n"              // NR_exit == 4001
     "syscall\n"
     ".end __start\n"
+    ".set pop\n"
     "");
 
 #endif // _NOLIBC_ARCH_MIPS_H
index ba04771..a3bdd98 100644 (file)
 #define O_RDONLY            0
 #define O_WRONLY            1
 #define O_RDWR              2
-#define O_CREAT         0x100
-#define O_EXCL          0x200
-#define O_NOCTTY        0x400
-#define O_TRUNC        0x1000
-#define O_APPEND       0x2000
-#define O_NONBLOCK     0x4000
-#define O_DIRECTORY  0x200000
+#define O_CREAT          0x40
+#define O_EXCL           0x80
+#define O_NOCTTY        0x100
+#define O_TRUNC         0x200
+#define O_APPEND        0x400
+#define O_NONBLOCK      0x800
+#define O_DIRECTORY   0x10000
 
 struct sys_stat_struct {
        unsigned long   st_dev;         /* Device.  */
index e3000b2..6f90706 100644 (file)
@@ -96,4 +96,7 @@ int ispunct(int c)
        return isgraph(c) && !isalnum(c);
 }
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_CTYPE_H */
index 06893d6..9dc4919 100644 (file)
@@ -24,4 +24,7 @@ static int errno;
  */
 #define MAX_ERRNO 4095
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_ERRNO_H */
index ef47e71..1375522 100644 (file)
@@ -19,4 +19,7 @@ int raise(int signal)
        return sys_kill(sys_getpid(), signal);
 }
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_SIGNAL_H */
index a3cebc4..96ac8af 100644 (file)
@@ -303,4 +303,7 @@ void perror(const char *msg)
        fprintf(stderr, "%s%serrno=%d\n", (msg && *msg) ? msg : "", (msg && *msg) ? ": " : "", errno);
 }
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_STDIO_H */
index 92378c4..a24000d 100644 (file)
@@ -419,4 +419,7 @@ char *u64toa(uint64_t in)
        return itoa_buffer;
 }
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_STDLIB_H */
index ad97c0d..fffdaf6 100644 (file)
@@ -88,8 +88,11 @@ void *memset(void *dst, int b, size_t len)
 {
        char *p = dst;
 
-       while (len--)
+       while (len--) {
+               /* prevent gcc from recognizing memset() here */
+               asm volatile("");
                *(p++) = b;
+       }
        return dst;
 }
 
@@ -285,4 +288,7 @@ char *strrchr(const char *s, int c)
        return (char *)ret;
 }
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_STRING_H */
index ce3ee03..78473d3 100644 (file)
@@ -1243,5 +1243,7 @@ ssize_t write(int fd, const void *buf, size_t count)
        return ret;
 }
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
 
 #endif /* _NOLIBC_SYS_H */
index d18b766..8465536 100644 (file)
@@ -25,4 +25,7 @@ time_t time(time_t *tptr)
        return tv.tv_sec;
 }
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_TIME_H */
index 9599970..fbbc0e6 100644 (file)
 #define S_IFSOCK       0140000
 #define S_IFMT         0170000
 
-#define S_ISDIR(mode)  (((mode) & S_IFDIR)  == S_IFDIR)
-#define S_ISCHR(mode)  (((mode) & S_IFCHR)  == S_IFCHR)
-#define S_ISBLK(mode)  (((mode) & S_IFBLK)  == S_IFBLK)
-#define S_ISREG(mode)  (((mode) & S_IFREG)  == S_IFREG)
-#define S_ISFIFO(mode) (((mode) & S_IFIFO)  == S_IFIFO)
-#define S_ISLNK(mode)  (((mode) & S_IFLNK)  == S_IFLNK)
-#define S_ISSOCK(mode) (((mode) & S_IFSOCK) == S_IFSOCK)
+#define S_ISDIR(mode)  (((mode) & S_IFMT) == S_IFDIR)
+#define S_ISCHR(mode)  (((mode) & S_IFMT) == S_IFCHR)
+#define S_ISBLK(mode)  (((mode) & S_IFMT) == S_IFBLK)
+#define S_ISREG(mode)  (((mode) & S_IFMT) == S_IFREG)
+#define S_ISFIFO(mode) (((mode) & S_IFMT) == S_IFIFO)
+#define S_ISLNK(mode)  (((mode) & S_IFMT) == S_IFLNK)
+#define S_ISSOCK(mode) (((mode) & S_IFMT) == S_IFSOCK)
 
 /* dirent types */
 #define DT_UNKNOWN     0x0
 #define EXIT_SUCCESS 0
 #define EXIT_FAILURE 1
 
+#define FD_SETIDXMASK (8 * sizeof(unsigned long))
+#define FD_SETBITMASK (8 * sizeof(unsigned long)-1)
+
 /* for select() */
 typedef struct {
-       uint32_t fd32[(FD_SETSIZE + 31) / 32];
+       unsigned long fds[(FD_SETSIZE + FD_SETBITMASK) / FD_SETIDXMASK];
 } fd_set;
 
-#define FD_CLR(fd, set) do {                                            \
-               fd_set *__set = (set);                                  \
-               int __fd = (fd);                                        \
-               if (__fd >= 0)                                          \
-                       __set->fd32[__fd / 32] &= ~(1U << (__fd & 31)); \
+#define FD_CLR(fd, set) do {                                           \
+               fd_set *__set = (set);                                  \
+               int __fd = (fd);                                        \
+               if (__fd >= 0)                                          \
+                       __set->fds[__fd / FD_SETIDXMASK] &=             \
+                               ~(1U << (__fd & FX_SETBITMASK));        \
        } while (0)
 
-#define FD_SET(fd, set) do {                                            \
-               fd_set *__set = (set);                                  \
-               int __fd = (fd);                                        \
-               if (__fd >= 0)                                          \
-                       __set->fd32[__fd / 32] |= 1U << (__fd & 31);    \
+#define FD_SET(fd, set) do {                                           \
+               fd_set *__set = (set);                                  \
+               int __fd = (fd);                                        \
+               if (__fd >= 0)                                          \
+                       __set->fds[__fd / FD_SETIDXMASK] |=             \
+                               1 << (__fd & FD_SETBITMASK);            \
        } while (0)
 
-#define FD_ISSET(fd, set) ({                                                  \
-               fd_set *__set = (set);                                        \
-               int __fd = (fd);                                              \
-               int __r = 0;                                                  \
-               if (__fd >= 0)                                                \
-                       __r = !!(__set->fd32[__fd / 32] & 1U << (__fd & 31)); \
-               __r;                                                          \
+#define FD_ISSET(fd, set) ({                                           \
+                       fd_set *__set = (set);                          \
+                       int __fd = (fd);                                \
+               int __r = 0;                                            \
+               if (__fd >= 0)                                          \
+                       __r = !!(__set->fds[__fd / FD_SETIDXMASK] &     \
+1U << (__fd & FD_SET_BITMASK));                                                \
+               __r;                                                    \
        })
 
-#define FD_ZERO(set) do {                                               \
-               fd_set *__set = (set);                                  \
-               int __idx;                                              \
-               for (__idx = 0; __idx < (FD_SETSIZE+31) / 32; __idx ++) \
-                       __set->fd32[__idx] = 0;                         \
+#define FD_ZERO(set) do {                                              \
+               fd_set *__set = (set);                                  \
+               int __idx;                                              \
+               int __size = (FD_SETSIZE+FD_SETBITMASK) / FD_SETIDXMASK;\
+               for (__idx = 0; __idx < __size; __idx++)                \
+                       __set->fds[__idx] = 0;                          \
        } while (0)
 
 /* for poll() */
@@ -202,4 +209,7 @@ struct stat {
 })
 #endif
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_TYPES_H */
index 1c25e20..1cfcd52 100644 (file)
@@ -51,4 +51,7 @@ int tcsetpgrp(int fd, pid_t pid)
        return ioctl(fd, TIOCSPGRP, &pid);
 }
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_UNISTD_H */
index 4350be7..4b7c8b3 100644 (file)
@@ -427,6 +427,15 @@ static int decode_instructions(struct objtool_file *file)
                        if (func->type != STT_NOTYPE && func->type != STT_FUNC)
                                continue;
 
+                       if (func->offset == sec->sh.sh_size) {
+                               /* Heuristic: likely an "end" symbol */
+                               if (func->type == STT_NOTYPE)
+                                       continue;
+                               WARN("%s(): STT_FUNC at end of section",
+                                    func->name);
+                               return -1;
+                       }
+
                        if (func->return_thunk || func->alias != func)
                                continue;
 
index c2504c3..5b87846 100644 (file)
@@ -589,6 +589,8 @@ ifndef NO_LIBELF
           $(call feature_check,libbpf-bpf_program__set_insns)
           ifeq ($(feature-libbpf-bpf_program__set_insns), 1)
             CFLAGS += -DHAVE_LIBBPF_BPF_PROGRAM__SET_INSNS
+          else
+            dummy := $(error Error: libbpf devel library needs to be >= 0.8.0 to build with LIBBPF_DYNAMIC, update or build statically with the version that comes with the kernel sources);
           endif
           $(call feature_check,libbpf-btf__raw_data)
           ifeq ($(feature-libbpf-btf__raw_data), 1)
@@ -602,6 +604,8 @@ ifndef NO_LIBELF
           dummy := $(error Error: No libbpf devel library found, please install libbpf-devel);
         endif
       else
+        # Libbpf will be built as a static library from tools/lib/bpf.
+       LIBBPF_STATIC := 1
        CFLAGS += -DHAVE_LIBBPF_BTF__LOAD_FROM_KERNEL_BY_ID
         CFLAGS += -DHAVE_LIBBPF_BPF_PROG_LOAD
         CFLAGS += -DHAVE_LIBBPF_BPF_OBJECT__NEXT_PROGRAM
@@ -1314,14 +1318,6 @@ tip_instdir_SQ = $(subst ','\'',$(tip_instdir))
 
 export perfexec_instdir_SQ
 
-# If we install to $(HOME) we keep the traceevent default:
-# $(HOME)/.traceevent/plugins
-# Otherwise we install plugins into the global $(libdir).
-ifdef DESTDIR
-plugindir=$(libdir)/traceevent/plugins
-plugindir_SQ= $(subst ','\'',$(plugindir))
-endif
-
 print_var = $(eval $(print_var_code)) $(info $(MSG))
 define print_var_code
     MSG = $(shell printf '...%40s: %s' $(1) $($(1)))
index 1e32c93..b7d9c42 100644 (file)
@@ -303,10 +303,12 @@ ifneq ($(OUTPUT),)
 else
   LIBBPF_OUTPUT = $(CURDIR)/libbpf
 endif
-LIBBPF_DESTDIR = $(LIBBPF_OUTPUT)
-LIBBPF_INCLUDE = $(LIBBPF_DESTDIR)/include
-LIBBPF = $(LIBBPF_OUTPUT)/libbpf.a
-CFLAGS += -I$(LIBBPF_OUTPUT)/include
+ifdef LIBBPF_STATIC
+  LIBBPF_DESTDIR = $(LIBBPF_OUTPUT)
+  LIBBPF_INCLUDE = $(LIBBPF_DESTDIR)/include
+  LIBBPF = $(LIBBPF_OUTPUT)/libbpf.a
+  CFLAGS += -I$(LIBBPF_OUTPUT)/include
+endif
 
 ifneq ($(OUTPUT),)
   LIBSUBCMD_OUTPUT = $(abspath $(OUTPUT))/libsubcmd
@@ -393,10 +395,8 @@ endif
 export PERL_PATH
 
 PERFLIBS = $(LIBAPI) $(LIBPERF) $(LIBSUBCMD) $(LIBSYMBOL)
-ifndef NO_LIBBPF
-  ifndef LIBBPF_DYNAMIC
-    PERFLIBS += $(LIBBPF)
-  endif
+ifdef LIBBPF_STATIC
+  PERFLIBS += $(LIBBPF)
 endif
 
 # We choose to avoid "if .. else if .. else .. endif endif"
@@ -756,12 +756,15 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
        $(arch_errno_name_array) \
        $(sync_file_range_arrays) \
        $(LIBAPI) \
-       $(LIBBPF) \
        $(LIBPERF) \
        $(LIBSUBCMD) \
        $(LIBSYMBOL) \
        bpf-skel
 
+ifdef LIBBPF_STATIC
+prepare: $(LIBBPF)
+endif
+
 $(OUTPUT)%.o: %.c prepare FORCE
        $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
 
index e20656c..8ae0a15 100644 (file)
@@ -26,6 +26,7 @@
 #include "util/string2.h"
 
 #include <linux/kernel.h>
+#include <linux/numa.h>
 #include <linux/rbtree.h>
 #include <linux/string.h>
 #include <linux/zalloc.h>
@@ -185,22 +186,33 @@ static int evsel__process_alloc_event(struct evsel *evsel, struct perf_sample *s
        total_allocated += bytes_alloc;
 
        nr_allocs++;
-       return 0;
-}
 
-static int evsel__process_alloc_node_event(struct evsel *evsel, struct perf_sample *sample)
-{
-       int ret = evsel__process_alloc_event(evsel, sample);
+       /*
+        * Commit 11e9734bcb6a ("mm/slab_common: unify NUMA and UMA
+        * version of tracepoints") adds the field "node" into the
+        * tracepoints 'kmalloc' and 'kmem_cache_alloc'.
+        *
+        * The legacy tracepoints 'kmalloc_node' and 'kmem_cache_alloc_node'
+        * also contain the field "node".
+        *
+        * If the tracepoint contains the field "node" the tool stats the
+        * cross allocation.
+        */
+       if (evsel__field(evsel, "node")) {
+               int node1, node2;
 
-       if (!ret) {
-               int node1 = cpu__get_node((struct perf_cpu){.cpu = sample->cpu}),
-                   node2 = evsel__intval(evsel, sample, "node");
+               node1 = cpu__get_node((struct perf_cpu){.cpu = sample->cpu});
+               node2 = evsel__intval(evsel, sample, "node");
 
-               if (node1 != node2)
+               /*
+                * If the field "node" is NUMA_NO_NODE (-1), we don't take it
+                * as a cross allocation.
+                */
+               if ((node2 != NUMA_NO_NODE) && (node1 != node2))
                        nr_cross_allocs++;
        }
 
-       return ret;
+       return 0;
 }
 
 static int ptr_cmp(void *, void *);
@@ -1369,8 +1381,8 @@ static int __cmd_kmem(struct perf_session *session)
                /* slab allocator */
                { "kmem:kmalloc",               evsel__process_alloc_event, },
                { "kmem:kmem_cache_alloc",      evsel__process_alloc_event, },
-               { "kmem:kmalloc_node",          evsel__process_alloc_node_event, },
-               { "kmem:kmem_cache_alloc_node", evsel__process_alloc_node_event, },
+               { "kmem:kmalloc_node",          evsel__process_alloc_event, },
+               { "kmem:kmem_cache_alloc_node", evsel__process_alloc_event, },
                { "kmem:kfree",                 evsel__process_free_event, },
                { "kmem:kmem_cache_free",       evsel__process_free_event, },
                /* page allocator */
@@ -1824,6 +1836,19 @@ static int parse_line_opt(const struct option *opt __maybe_unused,
        return 0;
 }
 
+static bool slab_legacy_tp_is_exposed(void)
+{
+       /*
+        * The tracepoints "kmem:kmalloc_node" and
+        * "kmem:kmem_cache_alloc_node" have been removed on the latest
+        * kernel, if the tracepoint "kmem:kmalloc_node" is existed it
+        * means the tool is running on an old kernel, we need to
+        * rollback to support these legacy tracepoints.
+        */
+       return IS_ERR(trace_event__tp_format("kmem", "kmalloc_node")) ?
+               false : true;
+}
+
 static int __cmd_record(int argc, const char **argv)
 {
        const char * const record_args[] = {
@@ -1831,22 +1856,28 @@ static int __cmd_record(int argc, const char **argv)
        };
        const char * const slab_events[] = {
        "-e", "kmem:kmalloc",
-       "-e", "kmem:kmalloc_node",
        "-e", "kmem:kfree",
        "-e", "kmem:kmem_cache_alloc",
-       "-e", "kmem:kmem_cache_alloc_node",
        "-e", "kmem:kmem_cache_free",
        };
+       const char * const slab_legacy_events[] = {
+       "-e", "kmem:kmalloc_node",
+       "-e", "kmem:kmem_cache_alloc_node",
+       };
        const char * const page_events[] = {
        "-e", "kmem:mm_page_alloc",
        "-e", "kmem:mm_page_free",
        };
        unsigned int rec_argc, i, j;
        const char **rec_argv;
+       unsigned int slab_legacy_tp_exposed = slab_legacy_tp_is_exposed();
 
        rec_argc = ARRAY_SIZE(record_args) + argc - 1;
-       if (kmem_slab)
+       if (kmem_slab) {
                rec_argc += ARRAY_SIZE(slab_events);
+               if (slab_legacy_tp_exposed)
+                       rec_argc += ARRAY_SIZE(slab_legacy_events);
+       }
        if (kmem_page)
                rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
 
@@ -1861,6 +1892,10 @@ static int __cmd_record(int argc, const char **argv)
        if (kmem_slab) {
                for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
                        rec_argv[i] = strdup(slab_events[j]);
+               if (slab_legacy_tp_exposed) {
+                       for (j = 0; j < ARRAY_SIZE(slab_legacy_events); j++, i++)
+                               rec_argv[i] = strdup(slab_legacy_events[j]);
+               }
        }
        if (kmem_page) {
                rec_argv[i++] = strdup("-g");
index 86e06f1..d21fe0f 100644 (file)
@@ -16,7 +16,9 @@
 
 #include "util/record.h"
 #include <api/fs/tracing_path.h>
+#ifdef HAVE_LIBBPF_SUPPORT
 #include <bpf/bpf.h>
+#endif
 #include "util/bpf_map.h"
 #include "util/rlimit.h"
 #include "builtin.h"
index bd83d36..91778b5 100644 (file)
@@ -20,6 +20,8 @@
 # undef if
 #endif
 
+typedef unsigned int __bitwise fmode_t;
+
 #define FMODE_READ             0x1
 #define FMODE_WRITE            0x2
 
index 05e818a..009d6ef 100644 (file)
@@ -222,19 +222,7 @@ installed_files_bin := bin/perf
 installed_files_bin += etc/bash_completion.d/perf
 installed_files_bin += libexec/perf-core/perf-archive
 
-installed_files_plugins := $(lib)/traceevent/plugins/plugin_cfg80211.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_scsi.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_xen.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_function.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_sched_switch.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_mac80211.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_kvm.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_kmem.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_hrtimer.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_jbd2.so
-
 installed_files_all := $(installed_files_bin)
-installed_files_all += $(installed_files_plugins)
 
 test_make_install       := $(call test_dest_files,$(installed_files_all))
 test_make_install_O     := $(call test_dest_files,$(installed_files_all))
index 265d20c..c2e323c 100644 (file)
@@ -2611,7 +2611,7 @@ static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
                                *size = sym->start - *start;
                        if (idx > 0) {
                                if (*size)
-                                       return 1;
+                                       return 0;
                        } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
                                print_duplicate_syms(dso, sym_name);
                                return -EINVAL;
index 4dbf264..c6d21c0 100644 (file)
@@ -4,9 +4,12 @@
 
 #include <linux/list.h>
 #include <sys/resource.h>
+
+#ifdef HAVE_LIBBPF_SUPPORT
 #include <bpf/bpf.h>
 #include <bpf/btf.h>
 #include <bpf/libbpf.h>
+#endif
 
 struct evsel;
 struct target;
@@ -87,6 +90,8 @@ static inline void set_max_rlimit(void)
        setrlimit(RLIMIT_MEMLOCK, &rinf);
 }
 
+#ifdef HAVE_BPF_SKEL
+
 static inline __u32 bpf_link_get_id(int fd)
 {
        struct bpf_link_info link_info = { .id = 0, };
@@ -127,5 +132,6 @@ static inline int bperf_trigger_reading(int prog_fd, int cpu)
 
        return bpf_prog_test_run_opts(prog_fd, &opts);
 }
+#endif /* HAVE_BPF_SKEL */
 
 #endif /* __PERF_BPF_COUNTER_H */
index fdb7f5d..85973e5 100644 (file)
@@ -15,6 +15,10 @@ bool mirrored_kernelcore = false;
 
 struct page {};
 
+void __free_pages_core(struct page *page, unsigned int order)
+{
+}
+
 void memblock_free_pages(struct page *page, unsigned long pfn,
                         unsigned int order)
 {
index b57e91e..532459a 100644 (file)
@@ -124,7 +124,7 @@ void producer(struct sockaddr_un *consumer_addr)
 
        wait_for_signal(pipefd[0]);
        if (connect(cfd, (struct sockaddr *)consumer_addr,
-                    sizeof(struct sockaddr)) != 0) {
+                    sizeof(*consumer_addr)) != 0) {
                perror("Connect failed");
                kill(0, SIGTERM);
                exit(1);
index dca1e6f..f11756e 100755 (executable)
 # In addition this script also checks if forcing a specific field in the
 # outer header is working.
 
+# Return 4 by default (Kselftest SKIP code)
+ERR=4
+
 if [ "$(id -u)" != "0" ]; then
        echo "Please run as root."
-       exit 0
+       exit $ERR
 fi
 if ! which tcpdump > /dev/null 2>&1; then
        echo "No tcpdump found. Required for this test."
-       exit 0
+       exit $ERR
 fi
 
 expected_tos="0x00"
 expected_ttl="0"
 failed=false
 
+readonly NS0=$(mktemp -u ns0-XXXXXXXX)
+readonly NS1=$(mktemp -u ns1-XXXXXXXX)
+
+RUN_NS0="ip netns exec ${NS0}"
+
 get_random_tos() {
        # Get a random hex tos value between 0x00 and 0xfc, a multiple of 4
        echo "0x$(tr -dc '0-9a-f' < /dev/urandom | head -c 1)\
@@ -61,7 +69,6 @@ setup() {
        local vlan="$5"
        local test_tos="0x00"
        local test_ttl="0"
-       local ns="ip netns exec testing"
 
        # We don't want a test-tos of 0x00,
        # because this is the value that we get when no tos is set.
@@ -94,14 +101,15 @@ setup() {
        printf "│%7s │%6s │%6s │%13s │%13s │%6s │" \
        "$type" "$outer" "$inner" "$tos" "$ttl" "$vlan"
 
-       # Create 'testing' netns, veth pair and connect main ns with testing ns
-       ip netns add testing
-       ip link add type veth
-       ip link set veth1 netns testing
-       ip link set veth0 up
-       $ns ip link set veth1 up
-       ip addr flush dev veth0
-       $ns ip addr flush dev veth1
+       # Create netns NS0 and NS1 and connect them with a veth pair
+       ip netns add "${NS0}"
+       ip netns add "${NS1}"
+       ip link add name veth0 netns "${NS0}" type veth \
+               peer name veth1 netns "${NS1}"
+       ip -netns "${NS0}" link set dev veth0 up
+       ip -netns "${NS1}" link set dev veth1 up
+       ip -netns "${NS0}" address flush dev veth0
+       ip -netns "${NS1}" address flush dev veth1
 
        local local_addr1=""
        local local_addr2=""
@@ -127,51 +135,59 @@ setup() {
                if [ "$type" = "gre" ]; then
                        type="gretap"
                fi
-               ip addr add 198.18.0.1/24 dev veth0
-               $ns ip addr add 198.18.0.2/24 dev veth1
-               ip link add name tep0 type $type $local_addr1 remote \
-               198.18.0.2 tos $test_tos ttl $test_ttl $vxlan $geneve
-               $ns ip link add name tep1 type $type $local_addr2 remote \
-               198.18.0.1 tos $test_tos ttl $test_ttl $vxlan $geneve
+               ip -netns "${NS0}" address add 198.18.0.1/24 dev veth0
+               ip -netns "${NS1}" address add 198.18.0.2/24 dev veth1
+               ip -netns "${NS0}" link add name tep0 type $type $local_addr1 \
+                       remote 198.18.0.2 tos $test_tos ttl $test_ttl         \
+                       $vxlan $geneve
+               ip -netns "${NS1}" link add name tep1 type $type $local_addr2 \
+                       remote 198.18.0.1 tos $test_tos ttl $test_ttl         \
+                       $vxlan $geneve
        elif [ "$outer" = "6" ]; then
                if [ "$type" = "gre" ]; then
                        type="ip6gretap"
                fi
-               ip addr add fdd1:ced0:5d88:3fce::1/64 dev veth0
-               $ns ip addr add fdd1:ced0:5d88:3fce::2/64 dev veth1
-               ip link add name tep0 type $type $local_addr1 \
-               remote fdd1:ced0:5d88:3fce::2 tos $test_tos ttl $test_ttl \
-               $vxlan $geneve
-               $ns ip link add name tep1 type $type $local_addr2 \
-               remote fdd1:ced0:5d88:3fce::1 tos $test_tos ttl $test_ttl \
-               $vxlan $geneve
+               ip -netns "${NS0}" address add fdd1:ced0:5d88:3fce::1/64 \
+                       dev veth0 nodad
+               ip -netns "${NS1}" address add fdd1:ced0:5d88:3fce::2/64 \
+                       dev veth1 nodad
+               ip -netns "${NS0}" link add name tep0 type $type $local_addr1 \
+                       remote fdd1:ced0:5d88:3fce::2 tos $test_tos           \
+                       ttl $test_ttl $vxlan $geneve
+               ip -netns "${NS1}" link add name tep1 type $type $local_addr2 \
+                       remote fdd1:ced0:5d88:3fce::1 tos $test_tos           \
+                       ttl $test_ttl $vxlan $geneve
        fi
 
        # Bring L2-tunnel link up and create VLAN on top
-       ip link set tep0 up
-       $ns ip link set tep1 up
-       ip addr flush dev tep0
-       $ns ip addr flush dev tep1
+       ip -netns "${NS0}" link set tep0 up
+       ip -netns "${NS1}" link set tep1 up
+       ip -netns "${NS0}" address flush dev tep0
+       ip -netns "${NS1}" address flush dev tep1
        local parent
        if $vlan; then
                parent="vlan99-"
-               ip link add link tep0 name ${parent}0 type vlan id 99
-               $ns ip link add link tep1 name ${parent}1 type vlan id 99
-               ip link set ${parent}0 up
-               $ns ip link set ${parent}1 up
-               ip addr flush dev ${parent}0
-               $ns ip addr flush dev ${parent}1
+               ip -netns "${NS0}" link add link tep0 name ${parent}0 \
+                       type vlan id 99
+               ip -netns "${NS1}" link add link tep1 name ${parent}1 \
+                       type vlan id 99
+               ip -netns "${NS0}" link set dev ${parent}0 up
+               ip -netns "${NS1}" link set dev ${parent}1 up
+               ip -netns "${NS0}" address flush dev ${parent}0
+               ip -netns "${NS1}" address flush dev ${parent}1
        else
                parent="tep"
        fi
 
        # Assign inner IPv4/IPv6 addresses
        if [ "$inner" = "4" ] || [ "$inner" = "other" ]; then
-               ip addr add 198.19.0.1/24 brd + dev ${parent}0
-               $ns ip addr add 198.19.0.2/24 brd + dev ${parent}1
+               ip -netns "${NS0}" address add 198.19.0.1/24 brd + dev ${parent}0
+               ip -netns "${NS1}" address add 198.19.0.2/24 brd + dev ${parent}1
        elif [ "$inner" = "6" ]; then
-               ip addr add fdd4:96cf:4eae:443b::1/64 dev ${parent}0
-               $ns ip addr add fdd4:96cf:4eae:443b::2/64 dev ${parent}1
+               ip -netns "${NS0}" address add fdd4:96cf:4eae:443b::1/64 \
+                       dev ${parent}0 nodad
+               ip -netns "${NS1}" address add fdd4:96cf:4eae:443b::2/64 \
+                       dev ${parent}1 nodad
        fi
 }
 
@@ -192,10 +208,10 @@ verify() {
                ping_dst="198.19.0.3" # Generates ARPs which are not IPv4/IPv6
        fi
        if [ "$tos_ttl" = "inherit" ]; then
-               ping -i 0.1 $ping_dst -Q "$expected_tos" -t "$expected_ttl" \
-               2>/dev/null 1>&2 & ping_pid="$!"
+               ${RUN_NS0} ping -i 0.1 $ping_dst -Q "$expected_tos"          \
+                        -t "$expected_ttl" 2>/dev/null 1>&2 & ping_pid="$!"
        else
-               ping -i 0.1 $ping_dst 2>/dev/null 1>&2 & ping_pid="$!"
+               ${RUN_NS0} ping -i 0.1 $ping_dst 2>/dev/null 1>&2 & ping_pid="$!"
        fi
        local tunnel_type_offset tunnel_type_proto req_proto_offset req_offset
        if [ "$type" = "gre" ]; then
@@ -216,10 +232,12 @@ verify() {
                                req_proto_offset="$((req_proto_offset + 4))"
                                req_offset="$((req_offset + 4))"
                        fi
-                       out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
-                       ip[$tunnel_type_offset] = $tunnel_type_proto and \
-                       ip[$req_proto_offset] = 0x01 and \
-                       ip[$req_offset] = 0x08 2>/dev/null | head -n 1)"
+                       out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
+                               -i veth0 -n                                   \
+                               ip[$tunnel_type_offset] = $tunnel_type_proto and \
+                               ip[$req_proto_offset] = 0x01 and              \
+                               ip[$req_offset] = 0x08 2>/dev/null            \
+                               | head -n 1)"
                elif [ "$inner" = "6" ]; then
                        req_proto_offset="44"
                        req_offset="78"
@@ -231,10 +249,12 @@ verify() {
                                req_proto_offset="$((req_proto_offset + 4))"
                                req_offset="$((req_offset + 4))"
                        fi
-                       out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
-                       ip[$tunnel_type_offset] = $tunnel_type_proto and \
-                       ip[$req_proto_offset] = 0x3a and \
-                       ip[$req_offset] = 0x80 2>/dev/null | head -n 1)"
+                       out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
+                               -i veth0 -n                                   \
+                               ip[$tunnel_type_offset] = $tunnel_type_proto and \
+                               ip[$req_proto_offset] = 0x3a and              \
+                               ip[$req_offset] = 0x80 2>/dev/null            \
+                               | head -n 1)"
                elif [ "$inner" = "other" ]; then
                        req_proto_offset="36"
                        req_offset="45"
@@ -250,11 +270,13 @@ verify() {
                                expected_tos="0x00"
                                expected_ttl="64"
                        fi
-                       out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
-                       ip[$tunnel_type_offset] = $tunnel_type_proto and \
-                       ip[$req_proto_offset] = 0x08 and \
-                       ip[$((req_proto_offset + 1))] = 0x06 and \
-                       ip[$req_offset] = 0x01 2>/dev/null | head -n 1)"
+                       out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
+                               -i veth0 -n                                   \
+                               ip[$tunnel_type_offset] = $tunnel_type_proto and \
+                               ip[$req_proto_offset] = 0x08 and              \
+                               ip[$((req_proto_offset + 1))] = 0x06 and      \
+                               ip[$req_offset] = 0x01 2>/dev/null            \
+                               | head -n 1)"
                fi
        elif [ "$outer" = "6" ]; then
                if [ "$type" = "gre" ]; then
@@ -273,10 +295,12 @@ verify() {
                                req_proto_offset="$((req_proto_offset + 4))"
                                req_offset="$((req_offset + 4))"
                        fi
-                       out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
-                       ip6[$tunnel_type_offset] = $tunnel_type_proto and \
-                       ip6[$req_proto_offset] = 0x01 and \
-                       ip6[$req_offset] = 0x08 2>/dev/null | head -n 1)"
+                       out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
+                               -i veth0 -n                                   \
+                               ip6[$tunnel_type_offset] = $tunnel_type_proto and \
+                               ip6[$req_proto_offset] = 0x01 and             \
+                               ip6[$req_offset] = 0x08 2>/dev/null           \
+                               | head -n 1)"
                elif [ "$inner" = "6" ]; then
                        local req_proto_offset="72"
                        local req_offset="106"
@@ -288,10 +312,12 @@ verify() {
                                req_proto_offset="$((req_proto_offset + 4))"
                                req_offset="$((req_offset + 4))"
                        fi
-                       out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
-                       ip6[$tunnel_type_offset] = $tunnel_type_proto and \
-                       ip6[$req_proto_offset] = 0x3a and \
-                       ip6[$req_offset] = 0x80 2>/dev/null | head -n 1)"
+                       out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
+                               -i veth0 -n                                   \
+                               ip6[$tunnel_type_offset] = $tunnel_type_proto and \
+                               ip6[$req_proto_offset] = 0x3a and             \
+                               ip6[$req_offset] = 0x80 2>/dev/null           \
+                               | head -n 1)"
                elif [ "$inner" = "other" ]; then
                        local req_proto_offset="64"
                        local req_offset="73"
@@ -307,15 +333,17 @@ verify() {
                                expected_tos="0x00"
                                expected_ttl="64"
                        fi
-                       out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
-                       ip6[$tunnel_type_offset] = $tunnel_type_proto and \
-                       ip6[$req_proto_offset] = 0x08 and \
-                       ip6[$((req_proto_offset + 1))] = 0x06 and \
-                       ip6[$req_offset] = 0x01 2>/dev/null | head -n 1)"
+                       out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
+                               -i veth0 -n                                   \
+                               ip6[$tunnel_type_offset] = $tunnel_type_proto and \
+                               ip6[$req_proto_offset] = 0x08 and             \
+                               ip6[$((req_proto_offset + 1))] = 0x06 and     \
+                               ip6[$req_offset] = 0x01 2>/dev/null           \
+                               | head -n 1)"
                fi
        fi
        kill -9 $ping_pid
-       wait $ping_pid 2>/dev/null
+       wait $ping_pid 2>/dev/null || true
        result="FAIL"
        if [ "$outer" = "4" ]; then
                captured_ttl="$(get_field "ttl" "$out")"
@@ -351,11 +379,35 @@ verify() {
 }
 
 cleanup() {
-       ip link del veth0 2>/dev/null
-       ip netns del testing 2>/dev/null
-       ip link del tep0 2>/dev/null
+       ip netns del "${NS0}" 2>/dev/null
+       ip netns del "${NS1}" 2>/dev/null
 }
 
+exit_handler() {
+       # Don't exit immediately if one of the intermediate commands fails.
+       # We might be called at the end of the script, when the network
+       # namespaces have already been deleted. So cleanup() may fail, but we
+       # still need to run until 'exit $ERR' or the script won't return the
+       # correct error code.
+       set +e
+
+       cleanup
+
+       exit $ERR
+}
+
+# Restore the default SIGINT handler (just in case) and exit.
+# The exit handler will take care of cleaning everything up.
+interrupted() {
+       trap - INT
+
+       exit $ERR
+}
+
+set -e
+trap exit_handler EXIT
+trap interrupted INT
+
 printf "┌────────┬───────┬───────┬──────────────┬"
 printf "──────────────┬───────┬────────┐\n"
 for type in gre vxlan geneve; do
@@ -385,6 +437,10 @@ done
 printf "└────────┴───────┴───────┴──────────────┴"
 printf "──────────────┴───────┴────────┘\n"
 
+# All tests done.
+# Set ERR appropriately: it will be returned by the exit handler.
 if $failed; then
-       exit 1
+       ERR=1
+else
+       ERR=0
 fi
index 13e8829..9c60384 100644 (file)
@@ -3954,6 +3954,13 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
        }
 
        mutex_lock(&kvm->lock);
+
+#ifdef CONFIG_LOCKDEP
+       /* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */
+       mutex_lock(&vcpu->mutex);
+       mutex_unlock(&vcpu->mutex);
+#endif
+
        if (kvm_get_vcpu_by_id(kvm, id)) {
                r = -EEXIST;
                goto unlock_vcpu_destroy;