Merge tag 'sched-core-2022-08-01' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 1 Aug 2022 18:49:06 +0000 (11:49 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 1 Aug 2022 18:49:06 +0000 (11:49 -0700)
Pull scheduler updates from Ingo Molnar:
"Load-balancing improvements:

   - Improve NUMA balancing on AMD Zen systems for affine workloads.

   - Improve the handling of reduced-capacity CPUs in load-balancing.

   - Energy Model improvements: fix & refine all the energy fairness
     metrics (PELT), and remove the conservative threshold requiring 6%
     energy savings to migrate a task. Doing this improves power
     efficiency for most workloads, and also increases the reliability
     of energy-efficiency scheduling.

   - Optimize/tweak select_idle_cpu() to spend (much) less time
     searching for an idle CPU on overloaded systems. There's reports of
     several milliseconds spent there on large systems with large
     workloads ...

     [ Since the search logic changed, there might be behavioral side
       effects. ]

   - Improve NUMA imbalance behavior. On certain systems with spare
     capacity, initial placement of tasks is non-deterministic, and such
     an artificial placement imbalance can persist for a long time,
     hurting (and sometimes helping) performance.

     The fix is to make fork-time task placement consistent with runtime
     NUMA balancing placement.

     Note that some performance regressions were reported against this,
     caused by workloads that are not memory bandwith limited, which
     benefit from the artificial locality of the placement bug(s). Mel
     Gorman's conclusion, with which we concur, was that consistency is
     better than random workload benefits from non-deterministic bugs:

        "Given there is no crystal ball and it's a tradeoff, I think
         it's better to be consistent and use similar logic at both fork
         time and runtime even if it doesn't have universal benefit."

   - Improve core scheduling by fixing a bug in
     sched_core_update_cookie() that caused unnecessary forced idling.

   - Improve wakeup-balancing by allowing same-LLC wakeup of idle CPUs
     for newly woken tasks.

   - Fix a newidle balancing bug that introduced unnecessary wakeup
     latencies.

  ABI improvements/fixes:

   - Do not check capabilities and do not issue capability check denial
     messages when a scheduler syscall doesn't require privileges. (Such
     as increasing niceness.)

   - Add forced-idle accounting to cgroups too.

   - Fix/improve the RSEQ ABI to not just silently accept unknown flags.
     (No existing tooling is known to have learned to rely on the
     previous behavior.)

   - Depreciate the (unused) RSEQ_CS_FLAG_NO_RESTART_ON_* flags.

  Optimizations:

   - Optimize & simplify leaf_cfs_rq_list()

   - Micro-optimize set_nr_{and_not,if}_polling() via try_cmpxchg().

  Misc fixes & cleanups:

   - Fix the RSEQ self-tests on RISC-V and Glibc 2.35 systems.

   - Fix a full-NOHZ bug that can in some cases result in the tick not
     being re-enabled when the last SCHED_RT task is gone from a
     runqueue but there's still SCHED_OTHER tasks around.

   - Various PREEMPT_RT related fixes.

   - Misc cleanups & smaller fixes"

* tag 'sched-core-2022-08-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (32 commits)
  rseq: Kill process when unknown flags are encountered in ABI structures
  rseq: Deprecate RSEQ_CS_FLAG_NO_RESTART_ON_* flags
  sched/core: Fix the bug that task won't enqueue into core tree when update cookie
  nohz/full, sched/rt: Fix missed tick-reenabling bug in dequeue_task_rt()
  sched/core: Always flush pending blk_plug
  sched/fair: fix case with reduced capacity CPU
  sched/core: Use try_cmpxchg in set_nr_{and_not,if}_polling
  sched/core: add forced idle accounting for cgroups
  sched/fair: Remove the energy margin in feec()
  sched/fair: Remove task_util from effective utilization in feec()
  sched/fair: Use the same cpumask per-PD throughout find_energy_efficient_cpu()
  sched/fair: Rename select_idle_mask to select_rq_mask
  sched, drivers: Remove max param from effective_cpu_util()/sched_cpu_util()
  sched/fair: Decay task PELT values during wakeup migration
  sched/fair: Provide u64 read for 32-bits arch helper
  sched/fair: Introduce SIS_UTIL to search idle CPU based on sum of util_avg
  sched: only perform capability check on privileged operation
  sched: Remove unused function group_first_cpu()
  sched/fair: Remove redundant word " *"
  selftests/rseq: check if libc rseq support is registered
  ...

1842 files changed:
.mailmap
CREDITS
Documentation/ABI/testing/sysfs-bus-iio-vf610
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/admin-guide/hw-vuln/index.rst
Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst [new file with mode: 0644]
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/perf/hns3-pmu.rst [new file with mode: 0644]
Documentation/admin-guide/perf/index.rst
Documentation/admin-guide/pm/cpuidle.rst
Documentation/arm64/elf_hwcaps.rst
Documentation/arm64/memory.rst
Documentation/arm64/silicon-errata.rst
Documentation/core-api/kernel-api.rst
Documentation/core-api/protection-keys.rst
Documentation/core-api/symbol-namespaces.rst
Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-engine.yaml
Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml
Documentation/devicetree/bindings/hwmon/ti,tmp401.yaml
Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.yaml
Documentation/devicetree/bindings/net/ethernet-controller.yaml
Documentation/devicetree/bindings/net/fsl,fec.yaml
Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
Documentation/devicetree/bindings/sound/qcom,lpass-cpu.yaml
Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml
Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.yaml
Documentation/devicetree/bindings/usb/generic-ehci.yaml
Documentation/devicetree/bindings/usb/generic-ohci.yaml
Documentation/driver-api/firmware/other_interfaces.rst
Documentation/driver-api/gpio/board.rst
Documentation/driver-api/gpio/consumer.rst
Documentation/driver-api/gpio/intro.rst
Documentation/features/vm/ioremap_prot/arch-support.txt
Documentation/filesystems/btrfs.rst
Documentation/filesystems/ext4/attributes.rst
Documentation/filesystems/ext4/bigalloc.rst
Documentation/filesystems/ext4/bitmaps.rst
Documentation/filesystems/ext4/blockgroup.rst
Documentation/filesystems/ext4/blockmap.rst
Documentation/filesystems/ext4/checksums.rst
Documentation/filesystems/ext4/directory.rst
Documentation/filesystems/ext4/eainode.rst
Documentation/filesystems/ext4/group_descr.rst
Documentation/filesystems/ext4/ifork.rst
Documentation/filesystems/ext4/inlinedata.rst
Documentation/filesystems/ext4/inodes.rst
Documentation/filesystems/ext4/journal.rst
Documentation/filesystems/ext4/mmp.rst
Documentation/filesystems/ext4/overview.rst
Documentation/filesystems/ext4/special_inodes.rst
Documentation/filesystems/ext4/super.rst
Documentation/filesystems/netfs_library.rst
Documentation/kbuild/llvm.rst
Documentation/livepatch/module-elf-format.rst
Documentation/loongarch/introduction.rst
Documentation/loongarch/irq-chip-model.rst
Documentation/memory-barriers.txt
Documentation/networking/dsa/dsa.rst
Documentation/networking/ip-sysctl.rst
Documentation/networking/phy.rst
Documentation/process/maintainer-netdev.rst
Documentation/sound/soc/dai.rst
Documentation/translations/it_IT/core-api/symbol-namespaces.rst
Documentation/translations/zh_CN/core-api/kernel-api.rst
Documentation/translations/zh_CN/core-api/symbol-namespaces.rst
Documentation/translations/zh_CN/loongarch/introduction.rst
Documentation/translations/zh_CN/loongarch/irq-chip-model.rst
Documentation/virt/kvm/api.rst
Documentation/virt/kvm/arm/hyp-abi.rst
Documentation/vm/hwpoison.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/aspeed-bmc-qcom-dc-scm-v1.dts [moved from arch/arm/boot/dts/aspeed-bmc-nuvia-dc-scm.dts with 97% similarity]
arch/arm/boot/dts/at91-sam9x60ek.dts
arch/arm/boot/dts/at91-sama5d2_icp.dts
arch/arm/boot/dts/at91-sama5d3_ksz9477_evb.dts
arch/arm/boot/dts/bcm2711-rpi-400.dts
arch/arm/boot/dts/imx6qdl-colibri.dtsi
arch/arm/boot/dts/imx6qdl-ts7970.dtsi
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6ull-colibri.dtsi
arch/arm/boot/dts/imx7d-smegw01.dts
arch/arm/boot/dts/imx7s.dtsi
arch/arm/boot/dts/lan966x-kontron-kswitch-d10-mmt.dtsi
arch/arm/boot/dts/lan966x.dtsi
arch/arm/boot/dts/qcom-msm8974.dtsi
arch/arm/boot/dts/sama5d2.dtsi
arch/arm/boot/dts/stm32mp15-scmi.dtsi [new file with mode: 0644]
arch/arm/boot/dts/stm32mp151.dtsi
arch/arm/boot/dts/stm32mp157a-dk1-scmi.dts
arch/arm/boot/dts/stm32mp157c-dk2-scmi.dts
arch/arm/boot/dts/stm32mp157c-ed1-scmi.dts
arch/arm/boot/dts/stm32mp157c-ev1-scmi.dts
arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts
arch/arm/configs/mxs_defconfig
arch/arm/include/asm/dma.h
arch/arm/include/asm/domain.h
arch/arm/include/asm/io.h
arch/arm/include/asm/mach/map.h
arch/arm/include/asm/ptrace.h
arch/arm/kernel/entry-common.S
arch/arm/lib/findbit.S
arch/arm/mach-at91/pm.c
arch/arm/mach-axxia/platsmp.c
arch/arm/mach-cns3xxx/core.c
arch/arm/mach-exynos/exynos.c
arch/arm/mach-meson/platsmp.c
arch/arm/mach-pxa/corgi.c
arch/arm/mach-pxa/hx4700.c
arch/arm/mach-pxa/icontrol.c
arch/arm/mach-pxa/littleton.c
arch/arm/mach-pxa/magician.c
arch/arm/mach-pxa/spitz.c
arch/arm/mach-pxa/z2.c
arch/arm/mach-rockchip/pm.c
arch/arm/mach-spear/time.c
arch/arm/mm/Kconfig
arch/arm/mm/alignment.c
arch/arm/mm/ioremap.c
arch/arm/mm/mmu.c
arch/arm/mm/nommu.c
arch/arm/mm/proc-v7-bugs.c
arch/arm/probes/decode.h
arch/arm/xen/p2m.c
arch/arm64/Kconfig
arch/arm64/boot/Makefile
arch/arm64/boot/dts/broadcom/bcm4908/bcm4906.dtsi
arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
arch/arm64/boot/dts/exynos/exynos7885.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
arch/arm64/boot/dts/freescale/imx8mp-evk.dts
arch/arm64/boot/dts/freescale/imx8mp-icore-mx8mp-edimm2.2.dts
arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
arch/arm64/boot/dts/freescale/imx8mp.dtsi
arch/arm64/boot/dts/freescale/s32g2.dtsi
arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
arch/arm64/boot/dts/qcom/msm8994.dtsi
arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi
arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi
arch/arm64/boot/dts/qcom/sdm845.dtsi
arch/arm64/boot/dts/qcom/sm8450.dtsi
arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
arch/arm64/boot/dts/ti/k3-am64-main.dtsi
arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
arch/arm64/include/asm/asm-extable.h
arch/arm64/include/asm/asm-uaccess.h
arch/arm64/include/asm/asm_pointer_auth.h
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/barrier.h
arch/arm64/include/asm/cache.h
arch/arm64/include/asm/cacheflush.h
arch/arm64/include/asm/cpu.h
arch/arm64/include/asm/cpu_ops.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cpuidle.h
arch/arm64/include/asm/el2_setup.h
arch/arm64/include/asm/fixmap.h
arch/arm64/include/asm/hwcap.h
arch/arm64/include/asm/io.h
arch/arm64/include/asm/kernel-pgtable.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/uaccess.h
arch/arm64/include/asm/virt.h
arch/arm64/include/uapi/asm/hwcap.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/acpi.c
arch/arm64/kernel/acpi_numa.c
arch/arm64/kernel/alternative.c
arch/arm64/kernel/armv8_deprecated.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpuidle.c
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/entry-ftrace.S
arch/arm64/kernel/entry.S
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/head.S
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/hyp-stub.S
arch/arm64/kernel/idreg-override.c
arch/arm64/kernel/image-vars.h
arch/arm64/kernel/kaslr.c
arch/arm64/kernel/kuser32.S
arch/arm64/kernel/mte.c
arch/arm64/kernel/pi/Makefile [new file with mode: 0644]
arch/arm64/kernel/pi/kaslr_early.c [new file with mode: 0644]
arch/arm64/kernel/setup.c
arch/arm64/kernel/signal.c
arch/arm64/kernel/sigreturn32.S
arch/arm64/kernel/sleep.S
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/suspend.c
arch/arm64/kernel/traps.c
arch/arm64/kernel/vdso/Makefile
arch/arm64/kernel/vdso/vdso.lds.S
arch/arm64/kernel/vdso32/Makefile
arch/arm64/kernel/vdso32/vdso.lds.S
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/kvm/arch_timer.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/fpsimd.c
arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/nvhe/sys_regs.c
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/vgic/vgic-mmio-v2.c
arch/arm64/kvm/vgic/vgic-mmio-v3.c
arch/arm64/kvm/vgic/vgic-mmio.c
arch/arm64/kvm/vgic/vgic-mmio.h
arch/arm64/kvm/vmid.c
arch/arm64/lib/mte.S
arch/arm64/mm/cache.S
arch/arm64/mm/copypage.c
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/extable.c
arch/arm64/mm/fault.c
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/init.c
arch/arm64/mm/ioremap.c
arch/arm64/mm/kasan_init.c
arch/arm64/mm/mmu.c
arch/arm64/mm/mteswap.c
arch/arm64/mm/proc.S
arch/arm64/tools/cpucaps
arch/arm64/tools/gen-sysreg.awk
arch/arm64/tools/sysreg
arch/csky/include/asm/tlb.h
arch/loongarch/Kconfig
arch/loongarch/include/asm/asmmacro.h
arch/loongarch/include/asm/atomic.h
arch/loongarch/include/asm/barrier.h
arch/loongarch/include/asm/branch.h
arch/loongarch/include/asm/cmpxchg.h
arch/loongarch/include/asm/compiler.h [deleted file]
arch/loongarch/include/asm/elf.h
arch/loongarch/include/asm/fpregdef.h
arch/loongarch/include/asm/futex.h
arch/loongarch/include/asm/irqflags.h
arch/loongarch/include/asm/local.h
arch/loongarch/include/asm/loongson.h
arch/loongarch/include/asm/page.h
arch/loongarch/include/asm/pgtable.h
arch/loongarch/include/asm/processor.h
arch/loongarch/include/asm/stacktrace.h
arch/loongarch/include/asm/thread_info.h
arch/loongarch/include/asm/tlb.h
arch/loongarch/include/asm/uaccess.h
arch/loongarch/kernel/asm-offsets.c
arch/loongarch/kernel/cacheinfo.c
arch/loongarch/kernel/cpu-probe.c
arch/loongarch/kernel/entry.S
arch/loongarch/kernel/env.c
arch/loongarch/kernel/fpu.S
arch/loongarch/kernel/genex.S
arch/loongarch/kernel/head.S
arch/loongarch/kernel/numa.c
arch/loongarch/kernel/ptrace.c
arch/loongarch/kernel/reset.c
arch/loongarch/kernel/setup.c
arch/loongarch/kernel/smp.c
arch/loongarch/kernel/switch.S
arch/loongarch/kernel/traps.c
arch/loongarch/kernel/vmlinux.lds.S
arch/loongarch/lib/clear_user.S
arch/loongarch/lib/copy_user.S
arch/loongarch/lib/delay.c
arch/loongarch/mm/page.S
arch/loongarch/mm/tlb.c
arch/loongarch/mm/tlbex.S
arch/loongarch/vdso/Makefile
arch/m68k/Kconfig.cpu
arch/m68k/Kconfig.debug
arch/m68k/Kconfig.machine
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/bitops.h
arch/m68k/include/asm/processor.h
arch/m68k/include/uapi/asm/bootinfo-virt.h
arch/m68k/kernel/traps.c
arch/m68k/mac/iop.c
arch/m68k/mac/macints.c
arch/m68k/q40/q40ints.c
arch/m68k/sun3/mmu_emu.c
arch/m68k/virt/config.c
arch/m68k/virt/ints.c
arch/m68k/virt/platform.c
arch/mips/boot/dts/ingenic/x1000.dtsi
arch/mips/boot/dts/ingenic/x1830.dtsi
arch/mips/generic/board-ranchu.c
arch/mips/lantiq/falcon/sysctrl.c
arch/mips/lantiq/irq.c
arch/mips/lantiq/xway/sysctrl.c
arch/mips/mti-malta/malta-time.c
arch/mips/pic32/pic32mzda/init.c
arch/mips/pic32/pic32mzda/time.c
arch/mips/ralink/of.c
arch/mips/vr41xx/common/icu.c
arch/openrisc/kernel/unwinder.c
arch/parisc/Kconfig
arch/parisc/include/asm/fb.h
arch/parisc/kernel/asm-offsets.c
arch/parisc/kernel/cache.c
arch/parisc/kernel/unaligned.c
arch/parisc/math-emu/decode_exc.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/bpf_perf_event.h [new file with mode: 0644]
arch/powerpc/include/asm/tlb.h
arch/powerpc/include/uapi/asm/bpf_perf_event.h [deleted file]
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/prom_init_check.sh
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/nohash/book3e_pgtable.c
arch/powerpc/platforms/microwatt/microwatt.h [new file with mode: 0644]
arch/powerpc/platforms/microwatt/rng.c
arch/powerpc/platforms/microwatt/setup.c
arch/powerpc/platforms/powernv/powernv.h
arch/powerpc/platforms/powernv/rng.c
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/pseries/pseries.h
arch/powerpc/platforms/pseries/rng.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/sysdev/xive/spapr.c
arch/riscv/Kconfig
arch/riscv/Kconfig.erratas
arch/riscv/Makefile
arch/riscv/boot/dts/canaan/canaan_kd233.dts
arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
arch/riscv/boot/dts/microchip/mpfs.dtsi
arch/riscv/errata/sifive/errata.c
arch/riscv/include/asm/errata_list.h
arch/riscv/include/asm/pgtable-64.h
arch/riscv/include/asm/pgtable.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/cpufeature.c
arch/riscv/kernel/elf_kexec.c
arch/riscv/kvm/mmu.c
arch/riscv/kvm/vcpu.c
arch/riscv/kvm/vmid.c
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/crypto/arch_random.c
arch/s390/include/asm/archrandom.h
arch/s390/include/asm/nospec-insn.h
arch/s390/include/asm/qdio.h
arch/s390/include/asm/tlb.h
arch/s390/kernel/crash_dump.c
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/perf_pai_crypto.c
arch/s390/kernel/setup.c
arch/s390/lib/Makefile
arch/s390/lib/expoline/Makefile [new file with mode: 0644]
arch/s390/lib/expoline/expoline.S [moved from arch/s390/lib/expoline.S with 100% similarity]
arch/s390/purgatory/Makefile
arch/sh/include/asm/io.h
arch/sparc/Kconfig
arch/sparc/include/asm/tlb_64.h
arch/um/include/asm/page.h
arch/um/include/shared/mem.h
arch/um/kernel/um_arch.c
arch/um/os-Linux/skas/process.c
arch/x86/.gitignore
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/Makefile
arch/x86/boot/compressed/ident_map_64.c
arch/x86/coco/tdx/tdx.c
arch/x86/entry/Makefile
arch/x86/entry/calling.h
arch/x86/entry/entry.S [new file with mode: 0644]
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/entry/entry_64_compat.S
arch/x86/entry/vdso/Makefile
arch/x86/entry/vsyscall/vsyscall_emu_64.S
arch/x86/events/intel/lbr.c
arch/x86/hyperv/hv_init.c
arch/x86/hyperv/ivm.c
arch/x86/include/asm/alternative.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/disabled-features.h
arch/x86/include/asm/e820/api.h
arch/x86/include/asm/efi.h
arch/x86/include/asm/fpu/api.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/linkage.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/mwait.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/pci_x86.h
arch/x86/include/asm/setup.h
arch/x86/include/asm/sev.h
arch/x86/include/asm/special_insns.h
arch/x86/include/asm/static_call.h
arch/x86/include/asm/tlb.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/unwind_hints.h
arch/x86/include/uapi/asm/bootparam.h
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/cppc.c
arch/x86/kernel/alternative.c
arch/x86/kernel/amd_nb.c
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/hygon.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mce/inject.c
arch/x86/kernel/cpu/mce/internal.h
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/cpu/vmware.c
arch/x86/kernel/e820.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/ftrace_64.S
arch/x86/kernel/head64.c
arch/x86/kernel/head_32.S
arch/x86/kernel/head_64.S
arch/x86/kernel/kexec-bzimage64.c
arch/x86/kernel/module.c
arch/x86/kernel/pmem.c
arch/x86/kernel/process.c
arch/x86/kernel/relocate_kernel_32.S
arch/x86/kernel/relocate_kernel_64.S
arch/x86/kernel/resource.c
arch/x86/kernel/setup.c
arch/x86/kernel/sev-shared.c
arch/x86/kernel/sev.c
arch/x86/kernel/static_call.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kvm/emulate.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/svm/vmenter.S
arch/x86/kvm/vmx/capabilities.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/run_flags.h [new file with mode: 0644]
arch/x86/kvm/vmx/vmenter.S
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/vmx/vmx_ops.h
arch/x86/kvm/x86.c
arch/x86/lib/memmove_64.S
arch/x86/lib/retpoline.S
arch/x86/mm/extable.c
arch/x86/mm/init.c
arch/x86/mm/mem_encrypt_boot.S
arch/x86/mm/pkeys.c
arch/x86/mm/tlb.c
arch/x86/net/bpf_jit_comp.c
arch/x86/pci/acpi.c
arch/x86/platform/efi/efi_thunk_64.S
arch/x86/purgatory/Makefile
arch/x86/purgatory/kexec-purgatory.S [new file with mode: 0644]
arch/x86/xen/enlighten_pv.c
arch/x86/xen/setup.c
arch/x86/xen/xen-asm.S
arch/x86/xen/xen-head.S
arch/x86/xen/xen-ops.h
arch/xtensa/kernel/entry.S
arch/xtensa/kernel/time.c
arch/xtensa/platforms/xtfpga/setup.c
block/bfq-iosched.c
block/blk-core.c
block/blk-ia-ranges.c
block/blk-merge.c
block/blk-mq-debugfs.c
block/blk-mq-debugfs.h
block/blk-mq-sched.c
block/blk-mq.c
block/blk-rq-qos.c
block/blk-rq-qos.h
block/blk-sysfs.c
block/genhd.c
block/holder.c
block/kyber-iosched.c
block/mq-deadline.c
certs/.gitignore
certs/Kconfig
certs/Makefile
certs/blacklist.c
certs/blacklist_hashes.c
certs/common.h [deleted file]
certs/system_keyring.c
crypto/Kconfig
crypto/Makefile
crypto/asymmetric_keys/Kconfig
crypto/asymmetric_keys/Makefile
crypto/asymmetric_keys/selftest.c [new file with mode: 0644]
crypto/asymmetric_keys/x509_loader.c [moved from certs/common.c with 87% similarity]
crypto/asymmetric_keys/x509_parser.h
crypto/asymmetric_keys/x509_public_key.c
drivers/acpi/acpi_video.c
drivers/acpi/bus.c
drivers/acpi/cppc_acpi.c
drivers/amba/bus.c
drivers/ata/pata_cs5535.c
drivers/base/core.c
drivers/base/cpu.c
drivers/base/init.c
drivers/base/memory.c
drivers/base/power/runtime.c
drivers/base/regmap/regmap-irq.c
drivers/base/regmap/regmap.c
drivers/block/xen-blkfront.c
drivers/bus/bt1-apb.c
drivers/bus/bt1-axi.c
drivers/bus/fsl-mc/fsl-mc-bus.c
drivers/char/lp.c
drivers/char/random.c
drivers/clk/clk-lan966x.c
drivers/clk/stm32/reset-stm32.c
drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
drivers/clocksource/hyperv_timer.c
drivers/comedi/drivers/vmk80xx.c
drivers/cpufreq/amd-pstate.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/mediatek-cpufreq.c
drivers/cpufreq/pmac32-cpufreq.c
drivers/cpufreq/qcom-cpufreq-hw.c
drivers/cpufreq/qoriq-cpufreq.c
drivers/cpuidle/Kconfig.arm
drivers/crypto/Kconfig
drivers/crypto/ccp/sp-platform.c
drivers/cxl/core/hdm.c
drivers/cxl/core/mbox.c
drivers/cxl/core/port.c
drivers/cxl/cxl.h
drivers/cxl/cxlmem.h
drivers/cxl/mem.c
drivers/cxl/pmem.c
drivers/devfreq/devfreq.c
drivers/devfreq/event/exynos-ppmu.c
drivers/devfreq/exynos-bus.c
drivers/devfreq/governor_passive.c
drivers/dma-buf/dma-resv.c
drivers/dma-buf/udmabuf.c
drivers/dma/at_xdmac.c
drivers/dma/dmatest.c
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
drivers/dma/idxd/device.c
drivers/dma/idxd/init.c
drivers/dma/imx-sdma.c
drivers/dma/lgm/lgm-dma.c
drivers/dma/pl330.c
drivers/dma/qcom/bam_dma.c
drivers/dma/ti/dma-crossbar.c
drivers/edac/ghes_edac.c
drivers/edac/synopsys_edac.c
drivers/firewire/core-cdev.c
drivers/firewire/core-device.c
drivers/firmware/arm_scmi/base.c
drivers/firmware/arm_scmi/bus.c
drivers/firmware/arm_scmi/clock.c
drivers/firmware/arm_scmi/driver.c
drivers/firmware/arm_scmi/optee.c
drivers/firmware/arm_scmi/perf.c
drivers/firmware/arm_scmi/power.c
drivers/firmware/arm_scmi/protocols.h
drivers/firmware/arm_scmi/reset.c
drivers/firmware/arm_scmi/sensors.c
drivers/firmware/arm_scmi/voltage.c
drivers/firmware/efi/reboot.c
drivers/firmware/efi/sysfb_efi.c
drivers/firmware/sysfb.c
drivers/firmware/sysfb_simplefb.c
drivers/gpio/gpio-grgpio.c
drivers/gpio/gpio-mxs.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-realtek-otto.c
drivers/gpio/gpio-sim.c
drivers/gpio/gpio-vf610.c
drivers/gpio/gpio-vr41xx.c
drivers/gpio/gpio-winbond.c
drivers/gpio/gpio-xilinx.c
drivers/gpio/gpiolib-cdev.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h [deleted file]
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/display/Kconfig
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
drivers/gpu/drm/bridge/fsl-ldb.c
drivers/gpu/drm/drm_aperture.c
drivers/gpu/drm/drm_gem_ttm_helper.c
drivers/gpu/drm/drm_panel_orientation_quirks.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_mic.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/display/intel_dpll_mgr.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_region.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
drivers/gpu/drm/i915/gem/i915_gem_wait.c
drivers/gpu/drm/i915/gt/intel_context_types.h
drivers/gpu/drm/i915/gt/intel_engine.h
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
drivers/gpu/drm/i915/gt/intel_gt.c
drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
drivers/gpu/drm/i915/gt/intel_gt_types.h
drivers/gpu/drm/i915/gt/intel_lrc.h
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gt/selftest_lrc.c
drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
drivers/gpu/drm/i915/gt/uc/intel_guc.c
drivers/gpu/drm/i915/gt/uc/intel_guc.h
drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/i915_driver.c
drivers/gpu/drm/i915/i915_drm_client.c
drivers/gpu/drm/i915/i915_scatterlist.c
drivers/gpu/drm/i915/i915_scatterlist.h
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_region_ttm.c
drivers/gpu/drm/i915/intel_region_ttm.h
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
drivers/gpu/drm/i915/selftests/intel_memory_region.c
drivers/gpu/drm/i915/selftests/mock_region.c
drivers/gpu/drm/imx/dcss/dcss-dev.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/dp/dp_ctrl.c
drivers/gpu/drm/msm/dp/dp_ctrl.h
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fence.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/msm/msm_gem_prime.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gem_vma.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/msm/msm_ringbuffer.c
drivers/gpu/drm/nouveau/nouveau_dmem.c
drivers/gpu/drm/panel/panel-edp.c
drivers/gpu/drm/panfrost/panfrost_drv.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/scheduler/sched_entity.c
drivers/gpu/drm/solomon/ssd130x.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/sun4i/sun4i_layer.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
drivers/gpu/drm/tiny/simpledrm.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_device.c
drivers/gpu/drm/ttm/ttm_resource.c
drivers/gpu/drm/vc4/vc4_bo.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_drv.h
drivers/gpu/drm/vc4/vc4_gem.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vc4/vc4_hvs.c
drivers/gpu/drm/vc4/vc4_irq.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/vc4/vc4_perfmon.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/drm/vc4/vc4_render_cl.c
drivers/gpu/drm/vc4/vc4_v3d.c
drivers/gpu/drm/vc4/vc4_validate.c
drivers/gpu/drm/vc4/vc4_validate_shaders.c
drivers/gpu/drm/xen/xen_drm_front_gem.c
drivers/hid/hid-hyperv.c
drivers/hv/channel_mgmt.c
drivers/hv/hv_kvp.c
drivers/hv/vmbus_drv.c
drivers/hwmon/asus-ec-sensors.c
drivers/hwmon/ibmaem.c
drivers/hwmon/k10temp.c
drivers/hwmon/occ/common.c
drivers/hwmon/occ/common.h
drivers/hwmon/occ/p8_i2c.c
drivers/hwmon/occ/p9_sbe.c
drivers/hwmon/pmbus/ucd9200.c
drivers/i2c/busses/i2c-cadence.c
drivers/i2c/busses/i2c-designware-common.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-mlxcpld.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-npcm7xx.c
drivers/i2c/busses/i2c-piix4.c
drivers/idle/intel_idle.c
drivers/iio/accel/bma180.c
drivers/iio/accel/kxcjk-1013.c
drivers/iio/accel/mma8452.c
drivers/iio/accel/mxc4005.c
drivers/iio/adc/adi-axi-adc.c
drivers/iio/adc/aspeed_adc.c
drivers/iio/adc/axp288_adc.c
drivers/iio/adc/rzg2l_adc.c
drivers/iio/adc/stm32-adc-core.c
drivers/iio/adc/stm32-adc.c
drivers/iio/adc/ti-ads131e08.c
drivers/iio/adc/xilinx-ams.c
drivers/iio/afe/iio-rescale.c
drivers/iio/chemical/ccs811.c
drivers/iio/frequency/admv1014.c
drivers/iio/gyro/mpu3050-core.c
drivers/iio/humidity/hts221_buffer.c
drivers/iio/imu/inv_icm42600/inv_icm42600.h
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
drivers/iio/magnetometer/yamaha-yas530.c
drivers/iio/proximity/sx9324.c
drivers/iio/test/Kconfig
drivers/iio/test/Makefile
drivers/iio/trigger/iio-trig-sysfs.c
drivers/infiniband/core/cm.c
drivers/infiniband/hw/irdma/cm.c
drivers/infiniband/hw/irdma/i40iw_hw.c
drivers/infiniband/hw/irdma/icrdma_hw.c
drivers/infiniband/hw/irdma/irdma.h
drivers/infiniband/hw/irdma/verbs.c
drivers/infiniband/hw/qedr/qedr.h
drivers/infiniband/hw/qedr/verbs.c
drivers/input/touchscreen/goodix.c
drivers/input/touchscreen/usbtouchscreen.c
drivers/input/touchscreen/wm97xx-core.c
drivers/iommu/intel/dmar.c
drivers/iommu/intel/iommu.c
drivers/iommu/intel/pasid.c
drivers/iommu/intel/pasid.h
drivers/iommu/ipmmu-vmsa.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-apple-aic.c
drivers/irqchip/irq-gic-realview.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-loongson-liointc.c
drivers/irqchip/irq-or1k-pic.c
drivers/irqchip/irq-realtek-rtl.c
drivers/irqchip/irq-uniphier-aidet.c
drivers/md/dm-core.h
drivers/md/dm-era-target.c
drivers/md/dm-log.c
drivers/md/dm-raid.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/md.h
drivers/md/raid5-ppl.c
drivers/md/raid5.c
drivers/memory/Kconfig
drivers/memory/mtk-smi.c
drivers/memory/samsung/exynos5422-dmc.c
drivers/misc/atmel-ssc.c
drivers/misc/cardreader/rts5261.c
drivers/misc/cardreader/rtsx_usb.c
drivers/misc/eeprom/at25.c
drivers/misc/lkdtm/Makefile
drivers/misc/mei/hbm.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/hw-me.c
drivers/misc/mei/pci-me.c
drivers/mmc/host/mtk-sd.c
drivers/mmc/host/sdhci-omap.c
drivers/mmc/host/sdhci-pci-o2micro.c
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/raw/nand_ids.c
drivers/net/Kconfig
drivers/net/amt.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/caif/caif_virtio.c
drivers/net/can/grcan.c
drivers/net/can/m_can/m_can.c
drivers/net/can/rcar/rcar_canfd.c
drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
drivers/net/can/usb/gs_usb.c
drivers/net/can/usb/kvaser_usb/kvaser_usb.h
drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
drivers/net/can/xilinx_can.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/hirschmann/hellcreek_ptp.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/ocelot/felix_vsc9959.c
drivers/net/dsa/qca8k.c
drivers/net/dsa/qca8k.h
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/dsa/vitesse-vsc73xx-spi.c
drivers/net/ethernet/amd/xgbe/xgbe-platform.c
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/broadcom/bgmac-bcma.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/fungible/funeth/funeth_rx.c
drivers/net/ethernet/fungible/funeth/funeth_tx.c
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
drivers/net/ethernet/huawei/hinic/hinic_devlink.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e1000e/hw.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_register.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/iavf/iavf.h
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/iavf/iavf_txrx.c
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/ice/ice_devids.h
drivers/net/ethernet/intel/ice/ice_devlink.c
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_fw_update.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_ptp.c
drivers/net/ethernet/intel/ice/ice_ptp.h
drivers/net/ethernet/intel/ice/ice_sriov.c
drivers/net/ethernet/intel/ice/ice_tc_lib.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_vf_lib.c
drivers/net/ethernet/intel/ice/ice_virtchnl.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/igc/igc_regs.h
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
drivers/net/ethernet/marvell/prestera/prestera_flower.c
drivers/net/ethernet/marvell/prestera/prestera_router.c
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
drivers/net/ethernet/mediatek/mtk_wed.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
drivers/net/ethernet/microchip/lan966x/lan966x_main.h
drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
drivers/net/ethernet/mscc/ocelot_fdma.c
drivers/net/ethernet/netronome/nfp/bpf/jit.c
drivers/net/ethernet/netronome/nfp/flower/action.c
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
drivers/net/ethernet/netronome/nfp/nfdk/dp.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/ef10_sriov.c
drivers/net/ethernet/sfc/ptp.c
drivers/net/ethernet/smsc/epic100.c
drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
drivers/net/ethernet/stmicro/stmmac/dwmac4.h
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/ethernet/xilinx/xilinx_axienet.h
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/hamradio/6pack.c
drivers/net/ipa/ipa_qmi_msg.h
drivers/net/macsec.c
drivers/net/pcs/pcs-xpcs.c
drivers/net/phy/aquantia_main.c
drivers/net/phy/at803x.c
drivers/net/phy/ax88796b.c
drivers/net/phy/dp83822.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/sfp.c
drivers/net/phy/smsc.c
drivers/net/sungem_phy.c
drivers/net/tun.c
drivers/net/usb/asix.h
drivers/net/usb/asix_common.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/catc.c
drivers/net/usb/r8152.c
drivers/net/usb/usbnet.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/wireless/ath/ath11k/wmi.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/realtek/rtw88/main.h
drivers/net/wireless/realtek/rtw88/rtw8821c.c
drivers/net/xen-netback/rx.c
drivers/net/xen-netfront.c
drivers/nfc/nfcmrvl/i2c.c
drivers/nfc/nfcmrvl/spi.c
drivers/nfc/nxp-nci/i2c.c
drivers/nvdimm/bus.c
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/host/trace.h
drivers/nvme/target/configfs.c
drivers/nvme/target/core.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/passthru.c
drivers/nvme/target/tcp.c
drivers/of/kexec.c
drivers/perf/arm-cci.c
drivers/perf/arm-ccn.c
drivers/perf/arm_spe_pmu.c
drivers/perf/fsl_imx8_ddr_perf.c
drivers/perf/hisilicon/Kconfig
drivers/perf/hisilicon/Makefile
drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
drivers/perf/hisilicon/hisi_uncore_pmu.c
drivers/perf/hisilicon/hisi_uncore_pmu.h
drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
drivers/perf/hisilicon/hns3_pmu.c [new file with mode: 0644]
drivers/perf/marvell_cn10k_tad_pmu.c
drivers/perf/riscv_pmu.c
drivers/perf/riscv_pmu_sbi.c
drivers/pinctrl/Kconfig
drivers/pinctrl/aspeed/pinctrl-aspeed.c
drivers/pinctrl/freescale/pinctrl-imx93.c
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
drivers/pinctrl/pinctrl-ocelot.c
drivers/pinctrl/ralink/pinctrl-ralink.c
drivers/pinctrl/stm32/pinctrl-stm32.c
drivers/pinctrl/sunplus/sppctl.c
drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
drivers/pinctrl/sunxi/pinctrl-sunxi.c
drivers/platform/mellanox/nvsw-sn2201.c
drivers/platform/x86/Kconfig
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/gigabyte-wmi.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel/atomisp2/led.c
drivers/platform/x86/intel/ifs/Kconfig
drivers/platform/x86/intel/pmc/core.c
drivers/platform/x86/panasonic-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/x86-android-tablets.c
drivers/power/reset/arm-versatile-reboot.c
drivers/power/supply/ab8500_fg.c
drivers/power/supply/power_supply_core.c
drivers/ptp/Kconfig
drivers/regulator/qcom_smd-regulator.c
drivers/s390/char/sclp.c
drivers/s390/crypto/ap_bus.c
drivers/s390/net/qeth_core_main.c
drivers/s390/virtio/virtio_ccw.c
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/pm8001/pm80xx_hwi.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_ioctl.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/storvsc_drv.c
drivers/soc/atmel/soc.c
drivers/soc/bcm/brcmstb/pm/pm-arm.c
drivers/soc/imx/imx8m-blk-ctrl.c
drivers/soc/ixp4xx/ixp4xx-npe.c
drivers/soc/qcom/smem.c
drivers/spi/spi-amd.c
drivers/spi/spi-aspeed-smc.c
drivers/spi/spi-bcm2835.c
drivers/spi/spi-cadence-quadspi.c
drivers/spi/spi-cadence.c
drivers/spi/spi-mem.c
drivers/spi/spi-rockchip.c
drivers/spi/spi-rspi.c
drivers/staging/olpc_dcon/Kconfig
drivers/staging/r8188eu/core/rtw_xmit.c
drivers/staging/r8188eu/os_dep/ioctl_linux.c
drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
drivers/staging/wlan-ng/hfa384x_usb.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_sbc.c
drivers/tee/optee/optee_smc.h
drivers/tee/optee/smc_abi.c
drivers/tee/tee_core.c
drivers/thermal/intel/intel_tcc_cooling.c
drivers/tty/goldfish.c
drivers/tty/n_gsm.c
drivers/tty/pty.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_dma.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/mvebu-uart.c
drivers/tty/serial/qcom_geni_serial.c
drivers/tty/serial/samsung_tty.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/stm32-usart.c
drivers/tty/sysrq.c
drivers/tty/tty.h
drivers/tty/tty_buffer.c
drivers/tty/vt/vt.c
drivers/ufs/core/ufshcd.c
drivers/ufs/host/ufshcd-pltfrm.c
drivers/usb/cdns3/cdnsp-ring.c
drivers/usb/chipidea/udc.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/dwc3-am62.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/u_ether.c
drivers/usb/gadget/function/uvc_configfs.c
drivers/usb/gadget/function/uvc_video.c
drivers/usb/gadget/legacy/raw_gadget.c
drivers/usb/gadget/udc/lpc32xx_udc.c
drivers/usb/host/ehci-fsl.c
drivers/usb/host/fsl-mph-dr-of.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/io_ti.c
drivers/usb/serial/io_usbvend.h
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/typec/class.c
drivers/usb/typec/tcpm/Kconfig
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa_user/vduse_dev.c
drivers/vfio/vfio.c
drivers/vhost/vdpa.c
drivers/video/console/sticore.c
drivers/video/fbdev/au1100fb.c
drivers/video/fbdev/cirrusfb.c
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/core/fbmem.c
drivers/video/fbdev/intelfb/intelfbdrv.c
drivers/video/fbdev/intelfb/intelfbhw.c
drivers/video/fbdev/omap/sossi.c
drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
drivers/video/fbdev/pxa3xx-gcu.c
drivers/video/fbdev/simplefb.c
drivers/video/fbdev/skeletonfb.c
drivers/virt/coco/sev-guest/sev-guest.c
drivers/virtio/Kconfig
drivers/virtio/virtio.c
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_pci_modern_dev.c
drivers/virtio/virtio_ring.c
drivers/watchdog/gxp-wdt.c
drivers/xen/features.c
drivers/xen/gntdev-common.h
drivers/xen/gntdev.c
fs/9p/fid.c
fs/9p/vfs_addr.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/afs/file.c
fs/afs/inode.c
fs/attr.c
fs/btrfs/block-group.h
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/locking.c
fs/btrfs/reflink.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/btrfs/tests/btrfs-tests.c
fs/btrfs/transaction.c
fs/btrfs/zoned.c
fs/btrfs/zoned.h
fs/cachefiles/ondemand.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/cifs/cifs_debug.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/connect.c
fs/cifs/misc.c
fs/cifs/sess.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/trace.h
fs/dlm/Kconfig
fs/dlm/Makefile
fs/dlm/ast.c
fs/dlm/config.c
fs/dlm/config.h
fs/dlm/dlm_internal.h
fs/dlm/lock.c
fs/dlm/lock.h
fs/dlm/lockspace.c
fs/dlm/lowcomms.c
fs/dlm/member.c
fs/dlm/plock.c
fs/dlm/recoverd.c
fs/dlm/user.c
fs/erofs/compress.h
fs/erofs/data.c
fs/erofs/decompressor.c
fs/erofs/decompressor_lzma.c
fs/erofs/dir.c
fs/erofs/zdata.c
fs/erofs/zdata.h
fs/erofs/zpvec.h [deleted file]
fs/exec.c
fs/exfat/namei.c
fs/ext2/dir.c
fs/ext2/inode.c
fs/ext2/super.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/migrate.c
fs/ext4/namei.c
fs/ext4/page-io.c
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/f2fs/file.c
fs/f2fs/iostat.c
fs/f2fs/namei.c
fs/f2fs/node.c
fs/f2fs/recovery.c
fs/fat/file.c
fs/fscache/cookie.c
fs/fscache/volume.c
fs/hugetlbfs/inode.c
fs/io_uring.c
fs/jbd2/transaction.c
fs/jfs/file.c
fs/ksmbd/smb2pdu.c
fs/ksmbd/transport_rdma.c
fs/ksmbd/transport_tcp.c
fs/ksmbd/vfs.c
fs/ksmbd/vfs.h
fs/lockd/svcsubs.c
fs/locks.c
fs/netfs/buffered_read.c
fs/nfs/callback_proc.c
fs/nfs/dir.c
fs/nfs/nfs4file.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfsd/nfs4xdr.c
fs/nfsd/nfsd.h
fs/nfsd/vfs.c
fs/nilfs2/nilfs.h
fs/notify/fanotify/fanotify.c
fs/notify/fanotify/fanotify.h
fs/notify/fanotify/fanotify_user.c
fs/notify/fdinfo.c
fs/notify/fsnotify.c
fs/notify/inotify/inotify_user.c
fs/ntfs/attrib.c
fs/ocfs2/file.c
fs/ocfs2/ocfs2.h
fs/ocfs2/slot_map.c
fs/ocfs2/super.c
fs/open.c
fs/overlayfs/copy_up.c
fs/overlayfs/inode.c
fs/overlayfs/overlayfs.h
fs/posix_acl.c
fs/quota/dquot.c
fs/read_write.c
fs/reiserfs/inode.c
fs/remap_range.c
fs/tracefs/inode.c
fs/userfaultfd.c
fs/xattr.c
fs/xfs/libxfs/xfs_attr.c
fs/xfs/libxfs/xfs_attr.h
fs/xfs/libxfs/xfs_attr_leaf.c
fs/xfs/libxfs/xfs_attr_leaf.h
fs/xfs/libxfs/xfs_da_btree.h
fs/xfs/xfs_attr_item.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_icache.c
fs/xfs/xfs_icache.h
fs/xfs/xfs_inode.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_log.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_qm_syscalls.c
fs/xfs/xfs_super.c
fs/xfs/xfs_trace.h
fs/xfs/xfs_xattr.c
fs/zonefs/super.c
include/acpi/cppc_acpi.h
include/asm-generic/barrier.h
include/asm-generic/io.h
include/asm-generic/tlb.h
include/drm/drm_atomic.h
include/drm/gpu_scheduler.h
include/drm/ttm/ttm_resource.h
include/keys/asymmetric-type.h
include/linux/acpi.h
include/linux/backing-dev.h
include/linux/blkdev.h
include/linux/cgroup-defs.h
include/linux/compiler_types.h
include/linux/console.h
include/linux/cpu.h
include/linux/cpuhotplug.h
include/linux/devfreq.h
include/linux/dim.h
include/linux/evm.h
include/linux/fanotify.h
include/linux/fbcon.h
include/linux/fs.h
include/linux/fscache.h
include/linux/fsnotify_backend.h
include/linux/gfp.h
include/linux/gpio/driver.h
include/linux/highmem.h
include/linux/huge_mm.h
include/linux/ima.h
include/linux/intel-iommu.h
include/linux/kexec.h
include/linux/kvm_host.h
include/linux/lockref.h
include/linux/memregion.h
include/linux/mm.h
include/linux/mnt_idmapping.h
include/linux/netdevice.h
include/linux/netfs.h
include/linux/nvme.h
include/linux/objtool.h
include/linux/of.h
include/linux/once_lite.h
include/linux/pci_ids.h
include/linux/perf/riscv_pmu.h
include/linux/phy.h
include/linux/pm_runtime.h
include/linux/posix_acl.h
include/linux/posix_acl_xattr.h
include/linux/printk.h
include/linux/quotaops.h
include/linux/ratelimit_types.h
include/linux/refcount.h
include/linux/reset.h
include/linux/rtsx_usb.h
include/linux/sched/task.h
include/linux/scmi_protocol.h
include/linux/security.h
include/linux/serial_core.h
include/linux/stmmac.h
include/linux/sysfb.h
include/linux/virtio_config.h
include/linux/visorbus.h [deleted file]
include/linux/vmalloc.h
include/linux/xattr.h
include/net/addrconf.h
include/net/amt.h
include/net/bluetooth/l2cap.h
include/net/cfg80211.h
include/net/flow_offload.h
include/net/inet_connection_sock.h
include/net/inet_hashtables.h
include/net/inet_sock.h
include/net/ip.h
include/net/mac80211.h
include/net/netfilter/nf_tables.h
include/net/protocol.h
include/net/raw.h
include/net/route.h
include/net/sock.h
include/net/tcp.h
include/net/tls.h
include/net/udp.h
include/sound/soc.h
include/trace/events/dlm.h
include/trace/events/io_uring.h
include/trace/events/iocost.h
include/trace/events/kmem.h
include/trace/events/libata.h
include/trace/events/sock.h
include/uapi/asm-generic/fcntl.h
include/uapi/drm/drm_fourcc.h
include/uapi/linux/bpf.h
include/uapi/linux/fanotify.h
include/uapi/linux/input.h
include/uapi/linux/io_uring.h
include/uapi/linux/kvm.h
include/uapi/linux/mptcp.h
include/uapi/linux/tty.h
include/video/of_display_timing.h
ipc/namespace.c
kernel/auditsc.c
kernel/bpf/btf.c
kernel/bpf/core.c
kernel/bpf/helpers.c
kernel/bpf/verifier.c
kernel/cfi.c
kernel/cgroup/cgroup.c
kernel/configs/x86_debug.config
kernel/dma/direct.c
kernel/events/core.c
kernel/exit.c
kernel/hung_task.c
kernel/irq/chip.c
kernel/kexec_file.c
kernel/kthread.c
kernel/locking/lockdep.c
kernel/locking/rwsem.c
kernel/module/internal.h
kernel/module/kallsyms.c
kernel/module/main.c
kernel/panic.c
kernel/power/hibernate.c
kernel/printk/printk.c
kernel/ptrace.c
kernel/rcu/srcutree.c
kernel/rcu/tree_stall.h
kernel/reboot.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/sched.h
kernel/signal.c
kernel/sysctl.c
kernel/time/posix-timers.c
kernel/time/tick-sched.c
kernel/trace/Kconfig
kernel/trace/blktrace.c
kernel/trace/bpf_trace.c
kernel/trace/ftrace.c
kernel/trace/rethook.c
kernel/trace/trace.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_uprobe.c
kernel/watch_queue.c
kernel/watchdog.c
kernel/watchdog_hld.c
kernel/workqueue.c
lib/Kconfig
lib/Kconfig.ubsan
lib/Makefile
lib/crypto/Kconfig
lib/idr.c
lib/lockref.c
lib/memneq.c [moved from crypto/memneq.c with 100% similarity]
lib/sbitmap.c
mm/backing-dev.c
mm/damon/reclaim.c
mm/damon/vaddr.c
mm/filemap.c
mm/gup.c
mm/hmm.c
mm/huge_memory.c
mm/hugetlb.c
mm/hwpoison-inject.c
mm/ioremap.c
mm/kasan/common.c
mm/kfence/core.c
mm/madvise.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/memremap.c
mm/migrate.c
mm/page_alloc.c
mm/page_isolation.c
mm/readahead.c
mm/rmap.c
mm/secretmem.c
mm/shmem.c
mm/slab.c
mm/slab.h
mm/slab_common.c
mm/slob.c
mm/slub.c
mm/sparse-vmemmap.c
mm/swap.c
mm/swap_slots.c
mm/usercopy.c
mm/userfaultfd.c
mm/vmalloc.c
net/8021q/vlan_netlink.c
net/ax25/af_ax25.c
net/bluetooth/hci_core.c
net/bluetooth/hci_sync.c
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_netlink.c
net/caif/caif_socket.c
net/can/bcm.c
net/core/dev.c
net/core/filter.c
net/core/net-sysfs.c
net/core/secure_seq.c
net/core/skmsg.c
net/core/sock_reuseport.c
net/dccp/proto.c
net/decnet/af_decnet.c
net/dsa/port.c
net/dsa/switch.c
net/ethtool/eeprom.c
net/ipv4/af_inet.c
net/ipv4/ah4.c
net/ipv4/cipso_ipv4.c
net/ipv4/esp4.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_hashtables.c
net/ipv4/inet_timewait_sock.c
net/ipv4/inetpeer.c
net/ipv4/ip_forward.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel_core.c
net/ipv4/netfilter/nf_reject_ipv4.c
net/ipv4/nexthop.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_bpf.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_recovery.c
net/ipv4/tcp_timer.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/icmp.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_input.c
net/ipv6/mcast.c
net/ipv6/ping.c
net/ipv6/route.c
net/ipv6/seg6_hmac.c
net/ipv6/seg6_iptunnel.c
net/ipv6/seg6_local.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/mac80211/cfg.c
net/mac80211/iface.c
net/mac80211/rx.c
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/wme.c
net/mptcp/options.c
net/mptcp/pm.c
net/mptcp/pm_netlink.c
net/mptcp/pm_userspace.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/ncsi/ncsi-manage.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_dup_netdev.c
net/netfilter/nf_log_syslog.c
net/netfilter/nf_synproxy_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/netfilter/nf_tables_trace.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_meta.c
net/netfilter/nft_numgen.c
net/netfilter/nft_queue.c
net/netfilter/nft_set_hash.c
net/netfilter/nft_set_pipapo.c
net/openvswitch/flow.c
net/rose/rose_route.c
net/rose/rose_timer.c
net/sched/act_api.c
net/sched/act_police.c
net/sched/cls_api.c
net/sched/sch_netem.c
net/sctp/associola.c
net/sctp/protocol.c
net/sctp/stream.c
net/sctp/stream_sched.c
net/smc/smc_llc.c
net/socket.c
net/sunrpc/clnt.c
net/sunrpc/xdr.c
net/tipc/core.c
net/tipc/node.c
net/tipc/socket.c
net/tls/tls_device.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/wireless/sme.c
net/xdp/xsk.c
net/xdp/xsk_buff_pool.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
samples/fprobe/fprobe_example.c
samples/kprobes/kprobe_example.c
samples/kprobes/kretprobe_example.c
scripts/Makefile.lib
scripts/Makefile.modinst
scripts/Makefile.vmlinux_o
scripts/clang-tools/gen_compile_commands.py
scripts/faddr2line
scripts/gdb/linux/symbols.py
scripts/gen_autoksyms.sh
scripts/mod/modpost.c
scripts/remove-stale-files
security/Kconfig
security/integrity/evm/evm_crypto.c
security/integrity/evm/evm_main.c
security/integrity/ima/ima_appraise.c
security/integrity/ima/ima_crypto.c
security/integrity/ima/ima_efi.c
security/integrity/ima/ima_kexec.c
security/integrity/ima/ima_policy.c
security/integrity/ima/ima_template_lib.c
security/security.c
security/selinux/hooks.c
sound/core/memalloc.c
sound/hda/hdac_i915.c
sound/hda/intel-dsp-config.c
sound/hda/intel-nhlt.c
sound/pci/cs46xx/cs46xx.c
sound/pci/hda/hda_auto_parser.c
sound/pci/hda/hda_local.h
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/soc/codecs/ak4613.c
sound/soc/codecs/arizona.c
sound/soc/codecs/cs35l41-lib.c
sound/soc/codecs/cs35l41.c
sound/soc/codecs/cs47l15.c
sound/soc/codecs/cs47l92.c
sound/soc/codecs/madera.c
sound/soc/codecs/max98373-sdw.c
sound/soc/codecs/max98396.c
sound/soc/codecs/rt1308-sdw.c
sound/soc/codecs/rt1316-sdw.c
sound/soc/codecs/rt5640.c
sound/soc/codecs/rt5682-sdw.c
sound/soc/codecs/rt700-sdw.c
sound/soc/codecs/rt700.c
sound/soc/codecs/rt711-sdca-sdw.c
sound/soc/codecs/rt711-sdca.c
sound/soc/codecs/rt711-sdw.c
sound/soc/codecs/rt711.c
sound/soc/codecs/rt715-sdca-sdw.c
sound/soc/codecs/rt715-sdw.c
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/sgtl5000.h
sound/soc/codecs/tas2764.c
sound/soc/codecs/tas2764.h
sound/soc/codecs/tlv320adcx140.c
sound/soc/codecs/wcd9335.c
sound/soc/codecs/wcd938x.c
sound/soc/codecs/wm5102.c
sound/soc/codecs/wm5110.c
sound/soc/codecs/wm8998.c
sound/soc/codecs/wm_adsp.c
sound/soc/generic/audio-graph-card2.c
sound/soc/intel/avs/topology.c
sound/soc/intel/boards/bytcr_wm5102.c
sound/soc/intel/boards/sof_rt5682.c
sound/soc/intel/boards/sof_sdw.c
sound/soc/intel/skylake/skl-nhlt.c
sound/soc/qcom/qdsp6/q6apm-dai.c
sound/soc/qcom/qdsp6/q6apm.c
sound/soc/soc-dapm.c
sound/soc/soc-ops.c
sound/soc/sof/intel/hda-dsp.c
sound/soc/sof/intel/hda-loader.c
sound/soc/sof/intel/hda-pcm.c
sound/soc/sof/intel/hda-stream.c
sound/soc/sof/intel/hda.h
sound/soc/sof/ipc3-topology.c
sound/soc/sof/mediatek/mt8186/mt8186.c
sound/soc/sof/pm.c
sound/soc/sof/sof-priv.h
sound/soc/ti/omap-mcbsp-priv.h
sound/soc/ti/omap-mcbsp-st.c
sound/soc/ti/omap-mcbsp.c
sound/usb/mixer_us16x08.c
sound/usb/quirks-table.h
sound/usb/quirks.c
sound/x86/intel_hdmi_audio.c
tools/arch/arm64/include/asm/cputype.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/disabled-features.h
tools/arch/x86/include/asm/msr-index.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/svm.h
tools/include/linux/objtool.h
tools/include/uapi/asm-generic/fcntl.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/prctl.h
tools/include/uapi/linux/vhost.h
tools/kvm/kvm_stat/kvm_stat
tools/lib/perf/evsel.c
tools/objtool/arch/x86/decode.c
tools/objtool/builtin-check.c
tools/objtool/check.c
tools/objtool/include/objtool/arch.h
tools/objtool/include/objtool/builtin.h
tools/objtool/include/objtool/check.h
tools/objtool/include/objtool/elf.h
tools/objtool/include/objtool/objtool.h
tools/objtool/objtool.c
tools/perf/builtin-inject.c
tools/perf/builtin-stat.c
tools/perf/builtin-trace.c
tools/perf/scripts/python/arm-cs-trace-disasm.py
tools/perf/tests/bp_account.c
tools/perf/tests/expr.c
tools/perf/tests/perf-time-to-tsc.c
tools/perf/tests/shell/lib/perf_csv_output_lint.py [deleted file]
tools/perf/tests/shell/stat+csv_output.sh
tools/perf/tests/shell/test_arm_callgraph_fp.sh
tools/perf/tests/topology.c
tools/perf/trace/beauty/arch_errno_names.sh
tools/perf/trace/beauty/include/linux/socket.h
tools/perf/util/arm-spe.c
tools/perf/util/bpf-loader.c
tools/perf/util/bpf-utils.c
tools/perf/util/bpf_off_cpu.c
tools/perf/util/bpf_skel/off_cpu.bpf.c
tools/perf/util/build-id.c
tools/perf/util/evsel.c
tools/perf/util/expr.l
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/metricgroup.c
tools/perf/util/off_cpu.h
tools/perf/util/symbol-elf.c
tools/perf/util/synthetic-events.c
tools/perf/util/unwind-libunwind-local.c
tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
tools/testing/selftests/bpf/prog_tests/tailcalls.c
tools/testing/selftests/bpf/progs/dynptr_fail.c
tools/testing/selftests/bpf/progs/dynptr_success.c
tools/testing/selftests/bpf/progs/kprobe_multi.c
tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c [new file with mode: 0644]
tools/testing/selftests/bpf/verifier/jmp32.c
tools/testing/selftests/bpf/verifier/jump.c
tools/testing/selftests/dma/Makefile
tools/testing/selftests/dma/dma_map_benchmark.c
tools/testing/selftests/gpio/Makefile
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/dirty_log_perf_test.c
tools/testing/selftests/kvm/include/perf_test_util.h
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/include/x86_64/vmx.h
tools/testing/selftests/kvm/lib/aarch64/ucall.c
tools/testing/selftests/kvm/lib/perf_test_util.c
tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c [new file with mode: 0644]
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/lib/x86_64/vmx.c
tools/testing/selftests/kvm/max_guest_memory_test.c
tools/testing/selftests/kvm/rseq_test.c
tools/testing/selftests/kvm/x86_64/mmu_role_test.c
tools/testing/selftests/lib.mk
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/bind_bhash_test.c [deleted file]
tools/testing/selftests/net/bpf/Makefile
tools/testing/selftests/net/fcnal-test.sh
tools/testing/selftests/net/fib_nexthop_nongw.sh [new file with mode: 0755]
tools/testing/selftests/net/forwarding/Makefile
tools/testing/selftests/net/forwarding/lib.sh
tools/testing/selftests/net/mptcp/Makefile
tools/testing/selftests/net/mptcp/diag.sh
tools/testing/selftests/net/mptcp/mptcp_connect.c
tools/testing/selftests/net/mptcp/mptcp_inq.c
tools/testing/selftests/net/mptcp/mptcp_sockopt.c
tools/testing/selftests/net/mptcp/pm_nl_ctl.c
tools/testing/selftests/net/mptcp/userspace_pm.sh
tools/testing/selftests/net/tun.c [new file with mode: 0644]
tools/testing/selftests/net/udpgro.sh
tools/testing/selftests/net/udpgro_bench.sh
tools/testing/selftests/net/udpgro_frglist.sh
tools/testing/selftests/net/udpgro_fwd.sh
tools/testing/selftests/net/udpgso_bench.sh
tools/testing/selftests/net/veth.sh
tools/testing/selftests/netfilter/nft_concat_range.sh
tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
tools/testing/selftests/vm/gup_test.c
tools/testing/selftests/vm/ksm_tests.c
tools/testing/selftests/wireguard/qemu/Makefile
tools/testing/selftests/wireguard/qemu/arch/arm.config
tools/testing/selftests/wireguard/qemu/arch/armeb.config
tools/testing/selftests/wireguard/qemu/arch/i686.config
tools/testing/selftests/wireguard/qemu/arch/m68k.config
tools/testing/selftests/wireguard/qemu/arch/mips.config
tools/testing/selftests/wireguard/qemu/arch/mipsel.config
tools/testing/selftests/wireguard/qemu/arch/powerpc.config
tools/testing/selftests/wireguard/qemu/arch/x86_64.config
tools/testing/selftests/wireguard/qemu/init.c
tools/vm/slabinfo.c
virt/kvm/kvm_main.c

index 825fae8..71577c3 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -10,6 +10,8 @@
 # Please keep this list dictionary sorted.
 #
 Aaron Durbin <adurbin@google.com>
+Abel Vesa <abelvesa@kernel.org> <abel.vesa@nxp.com>
+Abel Vesa <abelvesa@kernel.org> <abelvesa@gmail.com>
 Abhinav Kumar <quic_abhinavk@quicinc.com> <abhinavk@codeaurora.org>
 Adam Oldham <oldhamca@gmail.com>
 Adam Radford <aradford@gmail.com>
@@ -58,10 +60,17 @@ Arnd Bergmann <arnd@arndb.de>
 Atish Patra <atishp@atishpatra.org> <atish.patra@wdc.com>
 Axel Dyks <xl@xlsigned.net>
 Axel Lin <axel.lin@gmail.com>
+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@linaro.org>
+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@spreadtrum.com>
+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@unisoc.com>
+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang7@gmail.com>
 Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
 Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
 Ben Gardner <bgardner@wabtec.com>
 Ben M Cahill <ben.m.cahill@intel.com>
+Ben Widawsky <bwidawsk@kernel.org> <ben@bwidawsk.net>
+Ben Widawsky <bwidawsk@kernel.org> <ben.widawsky@intel.com>
+Ben Widawsky <bwidawsk@kernel.org> <benjamin.widawsky@intel.com>
 Björn Steinbrink <B.Steinbrink@gmx.de>
 Björn Töpel <bjorn@kernel.org> <bjorn.topel@gmail.com>
 Björn Töpel <bjorn@kernel.org> <bjorn.topel@intel.com>
@@ -85,6 +94,7 @@ Christian Borntraeger <borntraeger@linux.ibm.com> <borntrae@de.ibm.com>
 Christian Brauner <brauner@kernel.org> <christian@brauner.io>
 Christian Brauner <brauner@kernel.org> <christian.brauner@canonical.com>
 Christian Brauner <brauner@kernel.org> <christian.brauner@ubuntu.com>
+Christian Marangi <ansuelsmth@gmail.com>
 Christophe Ricard <christophe.ricard@gmail.com>
 Christoph Hellwig <hch@lst.de>
 Colin Ian King <colin.king@intel.com> <colin.king@canonical.com>
@@ -129,6 +139,8 @@ Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
 Frank Zago <fzago@systemfabricworks.com>
 Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
 Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
+Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com>
+Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com>
 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com>
@@ -165,6 +177,7 @@ Jan Glauber <jan.glauber@gmail.com> <jang@de.ibm.com>
 Jan Glauber <jan.glauber@gmail.com> <jang@linux.vnet.ibm.com>
 Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com>
 Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@linux.intel.com>
+Jarkko Sakkinen <jarkko@kernel.org> <jarkko@profian.com>
 Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
 Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com>
 Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
@@ -364,6 +377,7 @@ Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
 Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
 Sebastian Reichel <sre@kernel.org> <sre@debian.org>
 Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de>
+Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
 Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
 Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
 Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
diff --git a/CREDITS b/CREDITS
index 7e85a53..40d3c65 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -627,6 +627,10 @@ S: 48287 Sawleaf
 S: Fremont, California 94539
 S: USA
 
+N: Tomas Cech
+E: sleep_walker@suse.com
+D: arm/palm treo support
+
 N: Florent Chabaud
 E: florent.chabaud@polytechnique.org
 D: software suspend
index 308a675..491ead8 100644 (file)
@@ -1,4 +1,4 @@
-What:          /sys/bus/iio/devices/iio:deviceX/conversion_mode
+What:          /sys/bus/iio/devices/iio:deviceX/in_conversion_mode
 KernelVersion: 4.2
 Contact:       linux-iio@vger.kernel.org
 Description:
index 2ad01ca..df79e12 100644 (file)
@@ -493,12 +493,13 @@ What:             /sys/devices/system/cpu/cpuX/regs/
                /sys/devices/system/cpu/cpuX/regs/identification/
                /sys/devices/system/cpu/cpuX/regs/identification/midr_el1
                /sys/devices/system/cpu/cpuX/regs/identification/revidr_el1
+               /sys/devices/system/cpu/cpuX/regs/identification/smidr_el1
 Date:          June 2016
 Contact:       Linux ARM Kernel Mailing list <linux-arm-kernel@lists.infradead.org>
 Description:   AArch64 CPU registers
 
                'identification' directory exposes the CPU ID registers for
-               identifying model and revision of the CPU.
+               identifying model and revision of the CPU and SMCU.
 
 What:          /sys/devices/system/cpu/aarch32_el0
 Date:          May 2021
@@ -526,6 +527,7 @@ What:               /sys/devices/system/cpu/vulnerabilities
                /sys/devices/system/cpu/vulnerabilities/srbds
                /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
                /sys/devices/system/cpu/vulnerabilities/itlb_multihit
+               /sys/devices/system/cpu/vulnerabilities/mmio_stale_data
 Date:          January 2018
 Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:   Information about CPU vulnerabilities
index 8cbc711..4df436e 100644 (file)
@@ -17,3 +17,4 @@ are configurable at compile, boot or run time.
    special-register-buffer-data-sampling.rst
    core-scheduling.rst
    l1d_flush.rst
+   processor_mmio_stale_data.rst
diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
new file mode 100644 (file)
index 0000000..9393c50
--- /dev/null
@@ -0,0 +1,246 @@
+=========================================
+Processor MMIO Stale Data Vulnerabilities
+=========================================
+
+Processor MMIO Stale Data Vulnerabilities are a class of memory-mapped I/O
+(MMIO) vulnerabilities that can expose data. The sequences of operations for
+exposing data range from simple to very complex. Because most of the
+vulnerabilities require the attacker to have access to MMIO, many environments
+are not affected. System environments using virtualization where MMIO access is
+provided to untrusted guests may need mitigation. These vulnerabilities are
+not transient execution attacks. However, these vulnerabilities may propagate
+stale data into core fill buffers where the data can subsequently be inferred
+by an unmitigated transient execution attack. Mitigation for these
+vulnerabilities includes a combination of microcode update and software
+changes, depending on the platform and usage model. Some of these mitigations
+are similar to those used to mitigate Microarchitectural Data Sampling (MDS) or
+those used to mitigate Special Register Buffer Data Sampling (SRBDS).
+
+Data Propagators
+================
+Propagators are operations that result in stale data being copied or moved from
+one microarchitectural buffer or register to another. Processor MMIO Stale Data
+Vulnerabilities are operations that may result in stale data being directly
+read into an architectural, software-visible state or sampled from a buffer or
+register.
+
+Fill Buffer Stale Data Propagator (FBSDP)
+-----------------------------------------
+Stale data may propagate from fill buffers (FB) into the non-coherent portion
+of the uncore on some non-coherent writes. Fill buffer propagation by itself
+does not make stale data architecturally visible. Stale data must be propagated
+to a location where it is subject to reading or sampling.
+
+Sideband Stale Data Propagator (SSDP)
+-------------------------------------
+The sideband stale data propagator (SSDP) is limited to the client (including
+Intel Xeon server E3) uncore implementation. The sideband response buffer is
+shared by all client cores. For non-coherent reads that go to sideband
+destinations, the uncore logic returns 64 bytes of data to the core, including
+both requested data and unrequested stale data, from a transaction buffer and
+the sideband response buffer. As a result, stale data from the sideband
+response and transaction buffers may now reside in a core fill buffer.
+
+Primary Stale Data Propagator (PSDP)
+------------------------------------
+The primary stale data propagator (PSDP) is limited to the client (including
+Intel Xeon server E3) uncore implementation. Similar to the sideband response
+buffer, the primary response buffer is shared by all client cores. For some
+processors, MMIO primary reads will return 64 bytes of data to the core fill
+buffer including both requested data and unrequested stale data. This is
+similar to the sideband stale data propagator.
+
+Vulnerabilities
+===============
+Device Register Partial Write (DRPW) (CVE-2022-21166)
+-----------------------------------------------------
+Some endpoint MMIO registers incorrectly handle writes that are smaller than
+the register size. Instead of aborting the write or only copying the correct
+subset of bytes (for example, 2 bytes for a 2-byte write), more bytes than
+specified by the write transaction may be written to the register. On
+processors affected by FBSDP, this may expose stale data from the fill buffers
+of the core that created the write transaction.
+
+Shared Buffers Data Sampling (SBDS) (CVE-2022-21125)
+----------------------------------------------------
+After propagators may have moved data around the uncore and copied stale data
+into client core fill buffers, processors affected by MFBDS can leak data from
+the fill buffer. It is limited to the client (including Intel Xeon server E3)
+uncore implementation.
+
+Shared Buffers Data Read (SBDR) (CVE-2022-21123)
+------------------------------------------------
+It is similar to Shared Buffer Data Sampling (SBDS) except that the data is
+directly read into the architectural software-visible state. It is limited to
+the client (including Intel Xeon server E3) uncore implementation.
+
+Affected Processors
+===================
+Not all the CPUs are affected by all the variants. For instance, most
+processors for the server market (excluding Intel Xeon E3 processors) are
+impacted by only Device Register Partial Write (DRPW).
+
+Below is the list of affected Intel processors [#f1]_:
+
+   ===================  ============  =========
+   Common name          Family_Model  Steppings
+   ===================  ============  =========
+   HASWELL_X            06_3FH        2,4
+   SKYLAKE_L            06_4EH        3
+   BROADWELL_X          06_4FH        All
+   SKYLAKE_X            06_55H        3,4,6,7,11
+   BROADWELL_D          06_56H        3,4,5
+   SKYLAKE              06_5EH        3
+   ICELAKE_X            06_6AH        4,5,6
+   ICELAKE_D            06_6CH        1
+   ICELAKE_L            06_7EH        5
+   ATOM_TREMONT_D       06_86H        All
+   LAKEFIELD            06_8AH        1
+   KABYLAKE_L           06_8EH        9 to 12
+   ATOM_TREMONT         06_96H        1
+   ATOM_TREMONT_L       06_9CH        0
+   KABYLAKE             06_9EH        9 to 13
+   COMETLAKE            06_A5H        2,3,5
+   COMETLAKE_L          06_A6H        0,1
+   ROCKETLAKE           06_A7H        1
+   ===================  ============  =========
+
+If a CPU is in the affected processor list, but not affected by a variant, it
+is indicated by new bits in MSR IA32_ARCH_CAPABILITIES. As described in a later
+section, mitigation largely remains the same for all the variants, i.e. to
+clear the CPU fill buffers via VERW instruction.
+
+New bits in MSRs
+================
+Newer processors and microcode update on existing affected processors added new
+bits to IA32_ARCH_CAPABILITIES MSR. These bits can be used to enumerate
+specific variants of Processor MMIO Stale Data vulnerabilities and mitigation
+capability.
+
+MSR IA32_ARCH_CAPABILITIES
+--------------------------
+Bit 13 - SBDR_SSDP_NO - When set, processor is not affected by either the
+        Shared Buffers Data Read (SBDR) vulnerability or the sideband stale
+        data propagator (SSDP).
+Bit 14 - FBSDP_NO - When set, processor is not affected by the Fill Buffer
+        Stale Data Propagator (FBSDP).
+Bit 15 - PSDP_NO - When set, processor is not affected by Primary Stale Data
+        Propagator (PSDP).
+Bit 17 - FB_CLEAR - When set, VERW instruction will overwrite CPU fill buffer
+        values as part of MD_CLEAR operations. Processors that do not
+        enumerate MDS_NO (meaning they are affected by MDS) but that do
+        enumerate support for both L1D_FLUSH and MD_CLEAR implicitly enumerate
+        FB_CLEAR as part of their MD_CLEAR support.
+Bit 18 - FB_CLEAR_CTRL - Processor supports read and write to MSR
+        IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]. On such processors, the FB_CLEAR_DIS
+        bit can be set to cause the VERW instruction to not perform the
+        FB_CLEAR action. Not all processors that support FB_CLEAR will support
+        FB_CLEAR_CTRL.
+
+MSR IA32_MCU_OPT_CTRL
+---------------------
+Bit 3 - FB_CLEAR_DIS - When set, VERW instruction does not perform the FB_CLEAR
+action. This may be useful to reduce the performance impact of FB_CLEAR in
+cases where system software deems it warranted (for example, when performance
+is more critical, or the untrusted software has no MMIO access). Note that
+FB_CLEAR_DIS has no impact on enumeration (for example, it does not change
+FB_CLEAR or MD_CLEAR enumeration) and it may not be supported on all processors
+that enumerate FB_CLEAR.
+
+Mitigation
+==========
+Like MDS, all variants of Processor MMIO Stale Data vulnerabilities  have the
+same mitigation strategy to force the CPU to clear the affected buffers before
+an attacker can extract the secrets.
+
+This is achieved by using the otherwise unused and obsolete VERW instruction in
+combination with a microcode update. The microcode clears the affected CPU
+buffers when the VERW instruction is executed.
+
+Kernel reuses the MDS function to invoke the buffer clearing:
+
+       mds_clear_cpu_buffers()
+
+On MDS affected CPUs, the kernel already invokes CPU buffer clear on
+kernel/userspace, hypervisor/guest and C-state (idle) transitions. No
+additional mitigation is needed on such CPUs.
+
+For CPUs not affected by MDS or TAA, mitigation is needed only for the attacker
+with MMIO capability. Therefore, VERW is not required for kernel/userspace. For
+virtualization case, VERW is only needed at VMENTER for a guest with MMIO
+capability.
+
+Mitigation points
+-----------------
+Return to user space
+^^^^^^^^^^^^^^^^^^^^
+Same mitigation as MDS when affected by MDS/TAA, otherwise no mitigation
+needed.
+
+C-State transition
+^^^^^^^^^^^^^^^^^^
+Control register writes by CPU during C-state transition can propagate data
+from fill buffer to uncore buffers. Execute VERW before C-state transition to
+clear CPU fill buffers.
+
+Guest entry point
+^^^^^^^^^^^^^^^^^
+Same mitigation as MDS when processor is also affected by MDS/TAA, otherwise
+execute VERW at VMENTER only for MMIO capable guests. On CPUs not affected by
+MDS/TAA, guest without MMIO access cannot extract secrets using Processor MMIO
+Stale Data vulnerabilities, so there is no need to execute VERW for such guests.
+
+Mitigation control on the kernel command line
+---------------------------------------------
+The kernel command line allows to control the Processor MMIO Stale Data
+mitigations at boot time with the option "mmio_stale_data=". The valid
+arguments for this option are:
+
+  ==========  =================================================================
+  full        If the CPU is vulnerable, enable mitigation; CPU buffer clearing
+              on exit to userspace and when entering a VM. Idle transitions are
+              protected as well. It does not automatically disable SMT.
+  full,nosmt  Same as full, with SMT disabled on vulnerable CPUs. This is the
+              complete mitigation.
+  off         Disables mitigation completely.
+  ==========  =================================================================
+
+If the CPU is affected and mmio_stale_data=off is not supplied on the kernel
+command line, then the kernel selects the appropriate mitigation.
+
+Mitigation status information
+-----------------------------
+The Linux kernel provides a sysfs interface to enumerate the current
+vulnerability status of the system: whether the system is vulnerable, and
+which mitigations are active. The relevant sysfs file is:
+
+       /sys/devices/system/cpu/vulnerabilities/mmio_stale_data
+
+The possible values in this file are:
+
+  .. list-table::
+
+     * - 'Not affected'
+       - The processor is not vulnerable
+     * - 'Vulnerable'
+       - The processor is vulnerable, but no mitigation enabled
+     * - 'Vulnerable: Clear CPU buffers attempted, no microcode'
+       - The processor is vulnerable, but microcode is not updated. The
+         mitigation is enabled on a best effort basis.
+     * - 'Mitigation: Clear CPU buffers'
+       - The processor is vulnerable and the CPU buffer clearing mitigation is
+         enabled.
+
+If the processor is vulnerable then the following information is appended to
+the above information:
+
+  ========================  ===========================================
+  'SMT vulnerable'          SMT is enabled
+  'SMT disabled'            SMT is disabled
+  'SMT Host state unknown'  Kernel runs in a VM, Host SMT state unknown
+  ========================  ===========================================
+
+References
+----------
+.. [#f1] Affected Processors
+   https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
index 8090130..5e9147f 100644 (file)
        arm64.nomte     [ARM64] Unconditionally disable Memory Tagging Extension
                        support
 
+       arm64.nosve     [ARM64] Unconditionally disable Scalable Vector
+                       Extension support
+
+       arm64.nosme     [ARM64] Unconditionally disable Scalable Matrix
+                       Extension support
+
        ataflop=        [HW,M68k]
 
        atarimouse=     [HW,MOUSE] Atari Mouse
 
                        protected: nVHE-based mode with support for guests whose
                                   state is kept private from the host.
-                                  Not valid if the kernel is running in EL2.
 
                        Defaults to VHE/nVHE based on hardware support. Setting
                        mode to "protected" will disable kexec and hibernation
                                improves system performance, but it may also
                                expose users to several CPU vulnerabilities.
                                Equivalent to: nopti [X86,PPC]
-                                              kpti=0 [ARM64]
+                                              if nokaslr then kpti=0 [ARM64]
                                               nospectre_v1 [X86,PPC]
                                               nobp=0 [S390]
                                               nospectre_v2 [X86,PPC,S390,ARM64]
                                               srbds=off [X86,INTEL]
                                               no_entry_flush [PPC]
                                               no_uaccess_flush [PPC]
+                                              mmio_stale_data=off [X86]
+                                              retbleed=off [X86]
 
                                Exceptions:
                                               This does not have any effect on
                                Equivalent to: l1tf=flush,nosmt [X86]
                                               mds=full,nosmt [X86]
                                               tsx_async_abort=full,nosmt [X86]
+                                              mmio_stale_data=full,nosmt [X86]
+                                              retbleed=auto,nosmt [X86]
 
        mminit_loglevel=
                        [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
                        log everything. Information is printed at KERN_DEBUG
                        so loglevel=8 may also need to be specified.
 
+       mmio_stale_data=
+                       [X86,INTEL] Control mitigation for the Processor
+                       MMIO Stale Data vulnerabilities.
+
+                       Processor MMIO Stale Data is a class of
+                       vulnerabilities that may expose data after an MMIO
+                       operation. Exposed data could originate or end in
+                       the same CPU buffers as affected by MDS and TAA.
+                       Therefore, similar to MDS and TAA, the mitigation
+                       is to clear the affected CPU buffers.
+
+                       This parameter controls the mitigation. The
+                       options are:
+
+                       full       - Enable mitigation on vulnerable CPUs
+
+                       full,nosmt - Enable mitigation and disable SMT on
+                                    vulnerable CPUs.
+
+                       off        - Unconditionally disable mitigation
+
+                       On MDS or TAA affected machines,
+                       mmio_stale_data=off can be prevented by an active
+                       MDS or TAA mitigation as these vulnerabilities are
+                       mitigated with the same mechanism so in order to
+                       disable this mitigation, you need to specify
+                       mds=off and tsx_async_abort=off too.
+
+                       Not specifying this option is equivalent to
+                       mmio_stale_data=full.
+
+                       For details see:
+                       Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
+
        module.sig_enforce
                        [KNL] When CONFIG_MODULE_SIG is set, this means that
                        modules without (valid) signatures will fail to load.
 
        retain_initrd   [RAM] Keep initrd memory after extraction
 
+       retbleed=       [X86] Control mitigation of RETBleed (Arbitrary
+                       Speculative Code Execution with Return Instructions)
+                       vulnerability.
+
+                       off          - no mitigation
+                       auto         - automatically select a migitation
+                       auto,nosmt   - automatically select a mitigation,
+                                      disabling SMT if necessary for
+                                      the full mitigation (only on Zen1
+                                      and older without STIBP).
+                       ibpb         - mitigate short speculation windows on
+                                      basic block boundaries too. Safe, highest
+                                      perf impact.
+                       unret        - force enable untrained return thunks,
+                                      only effective on AMD f15h-f17h
+                                      based systems.
+                       unret,nosmt  - like unret, will disable SMT when STIBP
+                                      is not available.
+
+                       Selecting 'auto' will choose a mitigation method at run
+                       time according to the CPU.
+
+                       Not specifying this option is equivalent to retbleed=auto.
+
        rfkill.default_state=
                0       "airplane mode".  All wifi, bluetooth, wimax, gps, fm,
                        etc. communication is blocked by default.
                        eibrs             - enhanced IBRS
                        eibrs,retpoline   - enhanced IBRS + Retpolines
                        eibrs,lfence      - enhanced IBRS + LFENCE
+                       ibrs              - use IBRS to protect kernel
 
                        Not specifying this option is equivalent to
                        spectre_v2=auto.
                        expediting.  Set to zero to disable automatic
                        expediting.
 
+       srcutree.srcu_max_nodelay [KNL]
+                       Specifies the number of no-delay instances
+                       per jiffy for which the SRCU grace period
+                       worker thread will be rescheduled with zero
+                       delay. Beyond this limit, worker thread will
+                       be rescheduled with a sleep delay of one jiffy.
+
+       srcutree.srcu_max_nodelay_phase [KNL]
+                       Specifies the per-grace-period phase, number of
+                       non-sleeping polls of readers. Beyond this limit,
+                       grace period worker thread will be rescheduled
+                       with a sleep delay of one jiffy, between each
+                       rescan of the readers, for a grace period phase.
+
+       srcutree.srcu_retry_check_delay [KNL]
+                       Specifies number of microseconds of non-sleeping
+                       delay between each non-sleeping poll of readers.
+
        srcutree.small_contention_lim [KNL]
                        Specifies the number of update-side contention
                        events per jiffy will be tolerated before
diff --git a/Documentation/admin-guide/perf/hns3-pmu.rst b/Documentation/admin-guide/perf/hns3-pmu.rst
new file mode 100644 (file)
index 0000000..578407e
--- /dev/null
@@ -0,0 +1,136 @@
+======================================
+HNS3 Performance Monitoring Unit (PMU)
+======================================
+
+HNS3(HiSilicon network system 3) Performance Monitoring Unit (PMU) is an
+End Point device to collect performance statistics of HiSilicon SoC NIC.
+On Hip09, each SICL(Super I/O cluster) has one PMU device.
+
+HNS3 PMU supports collection of performance statistics such as bandwidth,
+latency, packet rate and interrupt rate.
+
+Each HNS3 PMU supports 8 hardware events.
+
+HNS3 PMU driver
+===============
+
+The HNS3 PMU driver registers a perf PMU with the name of its sicl id.::
+
+  /sys/devices/hns3_pmu_sicl_<sicl_id>
+
+PMU driver provides description of available events, filter modes, format,
+identifier and cpumask in sysfs.
+
+The "events" directory describes the event code of all supported events
+shown in perf list.
+
+The "filtermode" directory describes the supported filter modes of each
+event.
+
+The "format" directory describes all formats of the config (events) and
+config1 (filter options) fields of the perf_event_attr structure.
+
+The "identifier" file shows version of PMU hardware device.
+
+The "bdf_min" and "bdf_max" files show the supported bdf range of each
+pmu device.
+
+The "hw_clk_freq" file shows the hardware clock frequency of each pmu
+device.
+
+Example usage of checking event code and subevent code::
+
+  $# cat /sys/devices/hns3_pmu_sicl_0/events/dly_tx_normal_to_mac_time
+  config=0x00204
+  $# cat /sys/devices/hns3_pmu_sicl_0/events/dly_tx_normal_to_mac_packet_num
+  config=0x10204
+
+Each performance statistic has a pair of events to get two values to
+calculate real performance data in userspace.
+
+The bits 0~15 of config (here 0x0204) are the true hardware event code. If
+two events have same value of bits 0~15 of config, that means they are
+event pair. And the bit 16 of config indicates getting counter 0 or
+counter 1 of hardware event.
+
+After getting two values of event pair in usersapce, the formula of
+computation to calculate real performance data is:::
+
+  counter 0 / counter 1
+
+Example usage of checking supported filter mode::
+
+  $# cat /sys/devices/hns3_pmu_sicl_0/filtermode/bw_ssu_rpu_byte_num
+  filter mode supported: global/port/port-tc/func/func-queue/
+
+Example usage of perf::
+
+  $# perf list
+  hns3_pmu_sicl_0/bw_ssu_rpu_byte_num/ [kernel PMU event]
+  hns3_pmu_sicl_0/bw_ssu_rpu_time/     [kernel PMU event]
+  ------------------------------------------
+
+  $# perf stat -g -e hns3_pmu_sicl_0/bw_ssu_rpu_byte_num,global=1/ -e hns3_pmu_sicl_0/bw_ssu_rpu_time,global=1/ -I 1000
+  or
+  $# perf stat -g -e hns3_pmu_sicl_0/config=0x00002,global=1/ -e hns3_pmu_sicl_0/config=0x10002,global=1/ -I 1000
+
+
+Filter modes
+--------------
+
+1. global mode
+PMU collect performance statistics for all HNS3 PCIe functions of IO DIE.
+Set the "global" filter option to 1 will enable this mode.
+Example usage of perf::
+
+  $# perf stat -a -e hns3_pmu_sicl_0/config=0x1020F,global=1/ -I 1000
+
+2. port mode
+PMU collect performance statistic of one whole physical port. The port id
+is same as mac id. The "tc" filter option must be set to 0xF in this mode,
+here tc stands for traffic class.
+
+Example usage of perf::
+
+  $# perf stat -a -e hns3_pmu_sicl_0/config=0x1020F,port=0,tc=0xF/ -I 1000
+
+3. port-tc mode
+PMU collect performance statistic of one tc of physical port. The port id
+is same as mac id. The "tc" filter option must be set to 0 ~ 7 in this
+mode.
+Example usage of perf::
+
+  $# perf stat -a -e hns3_pmu_sicl_0/config=0x1020F,port=0,tc=0/ -I 1000
+
+4. func mode
+PMU collect performance statistic of one PF/VF. The function id is BDF of
+PF/VF, its conversion formula::
+
+  func = (bus << 8) + (device << 3) + (function)
+
+for example:
+  BDF         func
+  35:00.0    0x3500
+  35:00.1    0x3501
+  35:01.0    0x3508
+
+In this mode, the "queue" filter option must be set to 0xFFFF.
+Example usage of perf::
+
+  $# perf stat -a -e hns3_pmu_sicl_0/config=0x1020F,bdf=0x3500,queue=0xFFFF/ -I 1000
+
+5. func-queue mode
+PMU collect performance statistic of one queue of PF/VF. The function id
+is BDF of PF/VF, the "queue" filter option must be set to the exact queue
+id of function.
+Example usage of perf::
+
+  $# perf stat -a -e hns3_pmu_sicl_0/config=0x1020F,bdf=0x3500,queue=0/ -I 1000
+
+6. func-intr mode
+PMU collect performance statistic of one interrupt of PF/VF. The function
+id is BDF of PF/VF, the "intr" filter option must be set to the exact
+interrupt id of function.
+Example usage of perf::
+
+  $# perf stat -a -e hns3_pmu_sicl_0/config=0x00301,bdf=0x3500,intr=0/ -I 1000
index 69b23f0..9c9ece8 100644 (file)
@@ -9,6 +9,7 @@ Performance monitor support
 
    hisi-pmu
    hisi-pcie-pmu
+   hns3-pmu
    imx-ddr
    qcom_l2_pmu
    qcom_l3_pmu
index aec2cd2..19754be 100644 (file)
@@ -612,8 +612,8 @@ the ``menu`` governor to be used on the systems that use the ``ladder`` governor
 by default this way, for example.
 
 The other kernel command line parameters controlling CPU idle time management
-described below are only relevant for the *x86* architecture and some of
-them affect Intel processors only.
+described below are only relevant for the *x86* architecture and references
+to ``intel_idle`` affect Intel processors only.
 
 The *x86* architecture support code recognizes three kernel command line
 options related to CPU idle time management: ``idle=poll``, ``idle=halt``,
@@ -635,10 +635,13 @@ idle, so it very well may hurt single-thread computations performance as well as
 energy-efficiency.  Thus using it for performance reasons may not be a good idea
 at all.]
 
-The ``idle=nomwait`` option disables the ``intel_idle`` driver and causes
-``acpi_idle`` to be used (as long as all of the information needed by it is
-there in the system's ACPI tables), but it is not allowed to use the
-``MWAIT`` instruction of the CPUs to ask the hardware to enter idle states.
+The ``idle=nomwait`` option prevents the use of ``MWAIT`` instruction of
+the CPU to enter idle states. When this option is used, the ``acpi_idle``
+driver will use the ``HLT`` instruction instead of ``MWAIT``. On systems
+running Intel processors, this option disables the ``intel_idle`` driver
+and forces the use of the ``acpi_idle`` driver instead. Note that in either
+case, ``acpi_idle`` driver will function only if all the information needed
+by it is in the system's ACPI tables.
 
 In addition to the architecture-level kernel command line options affecting CPU
 idle time management, there are parameters affecting individual ``CPUIdle``
index 3d116fb..31fc10b 100644 (file)
@@ -301,6 +301,10 @@ HWCAP2_WFXT
 
     Functionality implied by ID_AA64ISAR2_EL1.WFXT == 0b0010.
 
+HWCAP2_EBF16
+
+    Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0010.
+
 4. Unused AT_HWCAP bits
 -----------------------
 
index 901cd09..2a641ba 100644 (file)
@@ -33,9 +33,8 @@ AArch64 Linux memory layout with 4KB pages + 4 levels (48-bit)::
   0000000000000000     0000ffffffffffff         256TB          user
   ffff000000000000     ffff7fffffffffff         128TB          kernel logical memory map
  [ffff600000000000     ffff7fffffffffff]         32TB          [kasan shadow region]
-  ffff800000000000     ffff800007ffffff         128MB          bpf jit region
-  ffff800008000000     ffff80000fffffff         128MB          modules
-  ffff800010000000     fffffbffefffffff         124TB          vmalloc
+  ffff800000000000     ffff800007ffffff         128MB          modules
+  ffff800008000000     fffffbffefffffff         124TB          vmalloc
   fffffbfff0000000     fffffbfffdffffff         224MB          fixed mappings (top down)
   fffffbfffe000000     fffffbfffe7fffff           8MB          [guard region]
   fffffbfffe800000     fffffbffff7fffff          16MB          PCI I/O space
@@ -51,9 +50,8 @@ AArch64 Linux memory layout with 64KB pages + 3 levels (52-bit with HW support):
   0000000000000000     000fffffffffffff           4PB          user
   fff0000000000000     ffff7fffffffffff          ~4PB          kernel logical memory map
  [fffd800000000000     ffff7fffffffffff]        512TB          [kasan shadow region]
-  ffff800000000000     ffff800007ffffff         128MB          bpf jit region
-  ffff800008000000     ffff80000fffffff         128MB          modules
-  ffff800010000000     fffffbffefffffff         124TB          vmalloc
+  ffff800000000000     ffff800007ffffff         128MB          modules
+  ffff800008000000     fffffbffefffffff         124TB          vmalloc
   fffffbfff0000000     fffffbfffdffffff         224MB          fixed mappings (top down)
   fffffbfffe000000     fffffbfffe7fffff           8MB          [guard region]
   fffffbfffe800000     fffffbffff7fffff          16MB          PCI I/O space
index d27db84..33b04db 100644 (file)
@@ -82,10 +82,14 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A57      | #1319537        | ARM64_ERRATUM_1319367       |
 +----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Cortex-A57      | #1742098        | ARM64_ERRATUM_1742098       |
++----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A72      | #853709         | N/A                         |
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A72      | #1319367        | ARM64_ERRATUM_1319367       |
 +----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Cortex-A72      | #1655431        | ARM64_ERRATUM_1742098       |
++----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A73      | #858921         | ARM64_ERRATUM_858921        |
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A76      | #1188873,1418040| ARM64_ERRATUM_1418040       |
@@ -102,6 +106,8 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A510     | #2077057        | ARM64_ERRATUM_2077057       |
 +----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Cortex-A510     | #2441009        | ARM64_ERRATUM_2441009       |
++----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A710     | #2119858        | ARM64_ERRATUM_2119858       |
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A710     | #2054223        | ARM64_ERRATUM_2054223       |
index d6b3f94..0793c40 100644 (file)
@@ -223,7 +223,7 @@ Module Loading
 Inter Module support
 --------------------
 
-Refer to the file kernel/module.c for more information.
+Refer to the files in kernel/module/ for more information.
 
 Hardware Interfaces
 ===================
index ec575e7..bf28ac0 100644 (file)
@@ -4,31 +4,29 @@
 Memory Protection Keys
 ======================
 
-Memory Protection Keys for Userspace (PKU aka PKEYs) is a feature
-which is found on Intel's Skylake (and later) "Scalable Processor"
-Server CPUs. It will be available in future non-server Intel parts
-and future AMD processors.
-
-For anyone wishing to test or use this feature, it is available in
-Amazon's EC2 C5 instances and is known to work there using an Ubuntu
-17.04 image.
-
-Memory Protection Keys provides a mechanism for enforcing page-based
-protections, but without requiring modification of the page tables
-when an application changes protection domains.  It works by
-dedicating 4 previously ignored bits in each page table entry to a
-"protection key", giving 16 possible keys.
-
-There is also a new user-accessible register (PKRU) with two separate
-bits (Access Disable and Write Disable) for each key.  Being a CPU
-register, PKRU is inherently thread-local, potentially giving each
+Memory Protection Keys provide a mechanism for enforcing page-based
+protections, but without requiring modification of the page tables when an
+application changes protection domains.
+
+Pkeys Userspace (PKU) is a feature which can be found on:
+        * Intel server CPUs, Skylake and later
+        * Intel client CPUs, Tiger Lake (11th Gen Core) and later
+        * Future AMD CPUs
+
+Pkeys work by dedicating 4 previously Reserved bits in each page table entry to
+a "protection key", giving 16 possible keys.
+
+Protections for each key are defined with a per-CPU user-accessible register
+(PKRU).  Each of these is a 32-bit register storing two bits (Access Disable
+and Write Disable) for each of 16 keys.
+
+Being a CPU register, PKRU is inherently thread-local, potentially giving each
 thread a different set of protections from every other thread.
 
-There are two new instructions (RDPKRU/WRPKRU) for reading and writing
-to the new register.  The feature is only available in 64-bit mode,
-even though there is theoretically space in the PAE PTEs.  These
-permissions are enforced on data access only and have no effect on
-instruction fetches.
+There are two instructions (RDPKRU/WRPKRU) for reading and writing to the
+register.  The feature is only available in 64-bit mode, even though there is
+theoretically space in the PAE PTEs.  These permissions are enforced on data
+access only and have no effect on instruction fetches.
 
 Syscalls
 ========
index 5ad9e0a..12e4aec 100644 (file)
@@ -51,8 +51,8 @@ namespace ``USB_STORAGE``, use::
 The corresponding ksymtab entry struct ``kernel_symbol`` will have the member
 ``namespace`` set accordingly. A symbol that is exported without a namespace will
 refer to ``NULL``. There is no default namespace if none is defined. ``modpost``
-and kernel/module.c make use the namespace at build time or module load time,
-respectively.
+and kernel/module/main.c make use the namespace at build time or module load
+time, respectively.
 
 2.2 Using the DEFAULT_SYMBOL_NAMESPACE define
 =============================================
index c388ae5..c9c346e 100644 (file)
@@ -94,6 +94,7 @@ if:
           - allwinner,sun8i-a83t-display-engine
           - allwinner,sun8i-r40-display-engine
           - allwinner,sun9i-a80-display-engine
+          - allwinner,sun20i-d1-display-engine
           - allwinner,sun50i-a64-display-engine
 
 then:
index ff0a5c5..e712444 100644 (file)
@@ -67,7 +67,7 @@ if:
 then:
   properties:
     clocks:
-      maxItems: 2
+      minItems: 2
 
   required:
     - clock-names
index fe0ac08..0e8ddf0 100644 (file)
@@ -40,9 +40,8 @@ properties:
       value to be used for converting remote channel measurements to
       temperature.
     $ref: /schemas/types.yaml#/definitions/int32
-    items:
-      minimum: -128
-      maximum: 127
+    minimum: -128
+    maximum: 127
 
   ti,beta-compensation:
     description:
index f89ebde..de7c5e5 100644 (file)
@@ -30,6 +30,7 @@ properties:
       - socionext,uniphier-ld11-aidet
       - socionext,uniphier-ld20-aidet
       - socionext,uniphier-pxs3-aidet
+      - socionext,uniphier-nx1-aidet
 
   reg:
     maxItems: 1
index 4f15463..170cd20 100644 (file)
@@ -167,70 +167,65 @@ properties:
       - in-band-status
 
   fixed-link:
-    allOf:
-      - if:
-          type: array
-        then:
-          deprecated: true
-          items:
-            - minimum: 0
-              maximum: 31
-              description:
-                Emulated PHY ID, choose any but unique to the all
-                specified fixed-links
-
-            - enum: [0, 1]
-              description:
-                Duplex configuration. 0 for half duplex or 1 for
-                full duplex
-
-            - enum: [10, 100, 1000, 2500, 10000]
-              description:
-                Link speed in Mbits/sec.
-
-            - enum: [0, 1]
-              description:
-                Pause configuration. 0 for no pause, 1 for pause
-
-            - enum: [0, 1]
-              description:
-                Asymmetric pause configuration. 0 for no asymmetric
-                pause, 1 for asymmetric pause
-
-
-      - if:
-          type: object
-        then:
-          properties:
-            speed:
-              description:
-                Link speed.
-              $ref: /schemas/types.yaml#/definitions/uint32
-              enum: [10, 100, 1000, 2500, 10000]
-
-            full-duplex:
-              $ref: /schemas/types.yaml#/definitions/flag
-              description:
-                Indicates that full-duplex is used. When absent, half
-                duplex is assumed.
-
-            pause:
-              $ref: /schemas/types.yaml#definitions/flag
-              description:
-                Indicates that pause should be enabled.
-
-            asym-pause:
-              $ref: /schemas/types.yaml#/definitions/flag
-              description:
-                Indicates that asym_pause should be enabled.
-
-            link-gpios:
-              maxItems: 1
-              description:
-                GPIO to determine if the link is up
-
-          required:
-            - speed
+    oneOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32-array
+        deprecated: true
+        items:
+          - minimum: 0
+            maximum: 31
+            description:
+              Emulated PHY ID, choose any but unique to the all
+              specified fixed-links
+
+          - enum: [0, 1]
+            description:
+              Duplex configuration. 0 for half duplex or 1 for
+              full duplex
+
+          - enum: [10, 100, 1000, 2500, 10000]
+            description:
+              Link speed in Mbits/sec.
+
+          - enum: [0, 1]
+            description:
+              Pause configuration. 0 for no pause, 1 for pause
+
+          - enum: [0, 1]
+            description:
+              Asymmetric pause configuration. 0 for no asymmetric
+              pause, 1 for asymmetric pause
+      - type: object
+        additionalProperties: false
+        properties:
+          speed:
+            description:
+              Link speed.
+            $ref: /schemas/types.yaml#/definitions/uint32
+            enum: [10, 100, 1000, 2500, 10000]
+
+          full-duplex:
+            $ref: /schemas/types.yaml#/definitions/flag
+            description:
+              Indicates that full-duplex is used. When absent, half
+              duplex is assumed.
+
+          pause:
+            $ref: /schemas/types.yaml#definitions/flag
+            description:
+              Indicates that pause should be enabled.
+
+          asym-pause:
+            $ref: /schemas/types.yaml#/definitions/flag
+            description:
+              Indicates that asym_pause should be enabled.
+
+          link-gpios:
+            maxItems: 1
+            description:
+              GPIO to determine if the link is up
+
+        required:
+          - speed
 
 additionalProperties: true
 
index daa2f79..1b18530 100644 (file)
@@ -183,6 +183,7 @@ properties:
       Should specify the gpio for phy reset.
 
   phy-reset-duration:
+    $ref: /schemas/types.yaml#/definitions/uint32
     deprecated: true
     description:
       Reset duration in milliseconds.  Should present only if property
@@ -191,12 +192,14 @@ properties:
       and 1 millisecond will be used instead.
 
   phy-reset-active-high:
+    type: boolean
     deprecated: true
     description:
       If present then the reset sequence using the GPIO specified in the
       "phy-reset-gpios" property is reversed (H=reset state, L=operation state).
 
   phy-reset-post-delay:
+    $ref: /schemas/types.yaml#/definitions/uint32
     deprecated: true
     description:
       Post reset delay in milliseconds. If present then a delay of phy-reset-post-delay
index 8cd0adb..7029cb1 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Atheros ath9k wireless devices Generic Binding
 
 maintainers:
-  - Kalle Valo <kvalo@codeaurora.org>
+  - Toke Høiland-Jørgensen <toke@toke.dk>
 
 description: |
   This node provides properties for configuring the ath9k wireless device.
index 8c01fdb..a677b05 100644 (file)
@@ -9,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Technologies ath11k wireless devices Generic Binding
 
 maintainers:
-  - Kalle Valo <kvalo@codeaurora.org>
+  - Kalle Valo <kvalo@kernel.org>
 
 description: |
   These are dt entries for Qualcomm Technologies, Inc. IEEE 802.11ax
index e9a5330..ef18a57 100644 (file)
@@ -25,12 +25,12 @@ properties:
       - qcom,sc7280-lpass-cpu
 
   reg:
-    minItems: 2
+    minItems: 1
     maxItems: 6
     description: LPAIF core registers
 
   reg-names:
-    minItems: 2
+    minItems: 1
     maxItems: 6
 
   clocks:
@@ -42,12 +42,12 @@ properties:
     maxItems: 10
 
   interrupts:
-    minItems: 2
+    minItems: 1
     maxItems: 4
     description: LPAIF DMA buffer interrupt
 
   interrupt-names:
-    minItems: 2
+    minItems: 1
     maxItems: 4
 
   qcom,adsp:
index ece261b..7326c0a 100644 (file)
@@ -47,6 +47,5 @@ examples:
         clocks = <&clkcfg CLK_SPI0>;
         interrupt-parent = <&plic>;
         interrupts = <54>;
-        spi-max-frequency = <25000000>;
     };
 ...
index e2c7b93..78ceb9d 100644 (file)
@@ -110,7 +110,6 @@ examples:
         pinctrl-names = "default";
         pinctrl-0 = <&qup_spi1_default>;
         interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>;
-        spi-max-frequency = <50000000>;
         #address-cells = <1>;
         #size-cells = <0>;
     };
index 0b4524b..1e84e1b 100644 (file)
@@ -136,7 +136,8 @@ properties:
       Phandle of a companion.
 
   phys:
-    maxItems: 1
+    minItems: 1
+    maxItems: 3
 
   phy-names:
     const: usb
index e2ac846..bb6bbd5 100644 (file)
@@ -103,7 +103,8 @@ properties:
       Overrides the detected port count
 
   phys:
-    maxItems: 1
+    minItems: 1
+    maxItems: 3
 
   phy-names:
     const: usb
index b81794e..06ac89a 100644 (file)
@@ -13,6 +13,12 @@ EDD Interfaces
 .. kernel-doc:: drivers/firmware/edd.c
    :internal:
 
+Generic System Framebuffers Interface
+-------------------------------------
+
+.. kernel-doc:: drivers/firmware/sysfb.c
+   :export:
+
 Intel Stratix10 SoC Service Layer
 ---------------------------------
 Some features of the Intel Stratix10 SoC require a level of privilege
index 4e3adf3..b33aa04 100644 (file)
@@ -6,7 +6,7 @@ This document explains how GPIOs can be assigned to given devices and functions.
 
 Note that it only applies to the new descriptor-based interface. For a
 description of the deprecated integer-based GPIO interface please refer to
-gpio-legacy.txt (actually, there is no real mapping possible with the old
+legacy.rst (actually, there is no real mapping possible with the old
 interface; you just fetch an integer from somewhere and request the
 corresponding GPIO).
 
index 47869ca..72bcf5f 100644 (file)
@@ -4,7 +4,7 @@ GPIO Descriptor Consumer Interface
 
 This document describes the consumer interface of the GPIO framework. Note that
 it describes the new descriptor-based interface. For a description of the
-deprecated integer-based GPIO interface please refer to gpio-legacy.txt.
+deprecated integer-based GPIO interface please refer to legacy.rst.
 
 
 Guidelines for GPIOs consumers
@@ -78,7 +78,7 @@ whether the line is configured active high or active low (see
 
 The two last flags are used for use cases where open drain is mandatory, such
 as I2C: if the line is not already configured as open drain in the mappings
-(see board.txt), then open drain will be enforced anyway and a warning will be
+(see board.rst), then open drain will be enforced anyway and a warning will be
 printed that the board configuration needs to be updated to match the use case.
 
 Both functions return either a valid GPIO descriptor, or an error code checkable
@@ -270,7 +270,7 @@ driven.
 The same is applicable for open drain or open source output lines: those do not
 actively drive their output high (open drain) or low (open source), they just
 switch their output to a high impedance value. The consumer should not need to
-care. (For details read about open drain in driver.txt.)
+care. (For details read about open drain in driver.rst.)
 
 With this, all the gpiod_set_(array)_value_xxx() functions interpret the
 parameter "value" as "asserted" ("1") or "de-asserted" ("0"). The physical line
index 2e924fb..c9c1924 100644 (file)
@@ -14,12 +14,12 @@ Due to the history of GPIO interfaces in the kernel, there are two different
 ways to obtain and use GPIOs:
 
   - The descriptor-based interface is the preferred way to manipulate GPIOs,
-    and is described by all the files in this directory excepted gpio-legacy.txt.
+    and is described by all the files in this directory excepted legacy.rst.
   - The legacy integer-based interface which is considered deprecated (but still
-    usable for compatibility reasons) is documented in gpio-legacy.txt.
+    usable for compatibility reasons) is documented in legacy.rst.
 
 The remainder of this document applies to the new descriptor-based interface.
-gpio-legacy.txt contains the same information applied to the legacy
+legacy.rst contains the same information applied to the legacy
 integer-based interface.
 
 
index b01bf7b..6bd78eb 100644 (file)
@@ -9,7 +9,7 @@
     |       alpha: | TODO |
     |         arc: |  ok  |
     |         arm: | TODO |
-    |       arm64: | TODO |
+    |       arm64: |  ok  |
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
index d0904f6..992eddb 100644 (file)
@@ -19,13 +19,23 @@ The main Btrfs features include:
     * Subvolumes (separate internal filesystem roots)
     * Object level mirroring and striping
     * Checksums on data and metadata (multiple algorithms available)
-    * Compression
+    * Compression (multiple algorithms available)
+    * Reflink, deduplication
+    * Scrub (on-line checksum verification)
+    * Hierarchical quota groups (subvolume and snapshot support)
     * Integrated multiple device support, with several raid algorithms
     * Offline filesystem check
-    * Efficient incremental backup and FS mirroring
+    * Efficient incremental backup and FS mirroring (send/receive)
+    * Trim/discard
     * Online filesystem defragmentation
+    * Swapfile support
+    * Zoned mode
+    * Read/write metadata verification
+    * Online resize (shrink, grow)
 
-For more information please refer to the wiki
+For more information please refer to the documentation site or wiki
+
+  https://btrfs.readthedocs.io
 
   https://btrfs.wiki.kernel.org
 
index 871d2da..8781469 100644 (file)
@@ -13,8 +13,8 @@ disappeared as of Linux 3.0.
 
 There are two places where extended attributes can be found. The first
 place is between the end of each inode entry and the beginning of the
-next inode entry. For example, if inode.i\_extra\_isize = 28 and
-sb.inode\_size = 256, then there are 256 - (128 + 28) = 100 bytes
+next inode entry. For example, if inode.i_extra_isize = 28 and
+sb.inode_size = 256, then there are 256 - (128 + 28) = 100 bytes
 available for in-inode extended attribute storage. The second place
 where extended attributes can be found is in the block pointed to by
 ``inode.i_file_acl``. As of Linux 3.11, it is not possible for this
@@ -38,8 +38,8 @@ Extended attributes, when stored after the inode, have a header
      - Name
      - Description
    * - 0x0
-     - \_\_le32
-     - h\_magic
+     - __le32
+     - h_magic
      - Magic number for identification, 0xEA020000. This value is set by the
        Linux driver, though e2fsprogs doesn't seem to check it(?)
 
@@ -55,28 +55,28 @@ The beginning of an extended attribute block is in
      - Name
      - Description
    * - 0x0
-     - \_\_le32
-     - h\_magic
+     - __le32
+     - h_magic
      - Magic number for identification, 0xEA020000.
    * - 0x4
-     - \_\_le32
-     - h\_refcount
+     - __le32
+     - h_refcount
      - Reference count.
    * - 0x8
-     - \_\_le32
-     - h\_blocks
+     - __le32
+     - h_blocks
      - Number of disk blocks used.
    * - 0xC
-     - \_\_le32
-     - h\_hash
+     - __le32
+     - h_hash
      - Hash value of all attributes.
    * - 0x10
-     - \_\_le32
-     - h\_checksum
+     - __le32
+     - h_checksum
      - Checksum of the extended attribute block.
    * - 0x14
-     - \_\_u32
-     - h\_reserved[3]
+     - __u32
+     - h_reserved[3]
      - Zero.
 
 The checksum is calculated against the FS UUID, the 64-bit block number
@@ -100,46 +100,46 @@ Attributes stored inside an inode do not need be stored in sorted order.
      - Name
      - Description
    * - 0x0
-     - \_\_u8
-     - e\_name\_len
+     - __u8
+     - e_name_len
      - Length of name.
    * - 0x1
-     - \_\_u8
-     - e\_name\_index
+     - __u8
+     - e_name_index
      - Attribute name index. There is a discussion of this below.
    * - 0x2
-     - \_\_le16
-     - e\_value\_offs
+     - __le16
+     - e_value_offs
      - Location of this attribute's value on the disk block where it is stored.
        Multiple attributes can share the same value. For an inode attribute
        this value is relative to the start of the first entry; for a block this
        value is relative to the start of the block (i.e. the header).
    * - 0x4
-     - \_\_le32
-     - e\_value\_inum
+     - __le32
+     - e_value_inum
      - The inode where the value is stored. Zero indicates the value is in the
        same block as this entry. This field is only used if the
-       INCOMPAT\_EA\_INODE feature is enabled.
+       INCOMPAT_EA_INODE feature is enabled.
    * - 0x8
-     - \_\_le32
-     - e\_value\_size
+     - __le32
+     - e_value_size
      - Length of attribute value.
    * - 0xC
-     - \_\_le32
-     - e\_hash
+     - __le32
+     - e_hash
      - Hash value of attribute name and attribute value. The kernel doesn't
        update the hash for in-inode attributes, so for that case this value
        must be zero, because e2fsck validates any non-zero hash regardless of
        where the xattr lives.
    * - 0x10
      - char
-     - e\_name[e\_name\_len]
+     - e_name[e_name_len]
      - Attribute name. Does not include trailing NULL.
 
 Attribute values can follow the end of the entry table. There appears to
 be a requirement that they be aligned to 4-byte boundaries. The values
 are stored starting at the end of the block and grow towards the
-xattr\_header/xattr\_entry table. When the two collide, the overflow is
+xattr_header/xattr_entry table. When the two collide, the overflow is
 put into a separate disk block. If the disk block fills up, the
 filesystem returns -ENOSPC.
 
@@ -167,15 +167,15 @@ the key name. Here is a map of name index values to key prefixes:
    * - 1
      - “user.”
    * - 2
-     - “system.posix\_acl\_access”
+     - “system.posix_acl_access”
    * - 3
-     - “system.posix\_acl\_default”
+     - “system.posix_acl_default”
    * - 4
      - “trusted.”
    * - 6
      - “security.”
    * - 7
-     - “system.” (inline\_data only?)
+     - “system.” (inline_data only?)
    * - 8
      - “system.richacl” (SuSE kernels only?)
 
index 72075aa..976a180 100644 (file)
@@ -23,7 +23,7 @@ means that a block group addresses 32 gigabytes instead of 128 megabytes,
 also shrinking the amount of file system overhead for metadata.
 
 The administrator can set a block cluster size at mkfs time (which is
-stored in the s\_log\_cluster\_size field in the superblock); from then
+stored in the s_log_cluster_size field in the superblock); from then
 on, the block bitmaps track clusters, not individual blocks. This means
 that block groups can be several gigabytes in size (instead of just
 128MiB); however, the minimum allocation unit becomes a cluster, not a
index c7546db..91c45d8 100644 (file)
@@ -9,15 +9,15 @@ group.
 The inode bitmap records which entries in the inode table are in use.
 
 As with most bitmaps, one bit represents the usage status of one data
-block or inode table entry. This implies a block group size of 8 \*
-number\_of\_bytes\_in\_a\_logical\_block.
+block or inode table entry. This implies a block group size of 8 *
+number_of_bytes_in_a_logical_block.
 
 NOTE: If ``BLOCK_UNINIT`` is set for a given block group, various parts
 of the kernel and e2fsprogs code pretends that the block bitmap contains
 zeros (i.e. all blocks in the group are free). However, it is not
 necessarily the case that no blocks are in use -- if ``meta_bg`` is set,
 the bitmaps and group descriptor live inside the group. Unfortunately,
-ext2fs\_test\_block\_bitmap2() will return '0' for those locations,
+ext2fs_test_block_bitmap2() will return '0' for those locations,
 which produces confusing debugfs output.
 
 Inode Table
index d5d652a..46d78f8 100644 (file)
@@ -56,39 +56,39 @@ established that the super block and the group descriptor table, if
 present, will be at the beginning of the block group. The bitmaps and
 the inode table can be anywhere, and it is quite possible for the
 bitmaps to come after the inode table, or for both to be in different
-groups (flex\_bg). Leftover space is used for file data blocks, indirect
+groups (flex_bg). Leftover space is used for file data blocks, indirect
 block maps, extent tree blocks, and extended attributes.
 
 Flexible Block Groups
 ---------------------
 
 Starting in ext4, there is a new feature called flexible block groups
-(flex\_bg). In a flex\_bg, several block groups are tied together as one
+(flex_bg). In a flex_bg, several block groups are tied together as one
 logical block group; the bitmap spaces and the inode table space in the
-first block group of the flex\_bg are expanded to include the bitmaps
-and inode tables of all other block groups in the flex\_bg. For example,
-if the flex\_bg size is 4, then group 0 will contain (in order) the
+first block group of the flex_bg are expanded to include the bitmaps
+and inode tables of all other block groups in the flex_bg. For example,
+if the flex_bg size is 4, then group 0 will contain (in order) the
 superblock, group descriptors, data block bitmaps for groups 0-3, inode
 bitmaps for groups 0-3, inode tables for groups 0-3, and the remaining
 space in group 0 is for file data. The effect of this is to group the
 block group metadata close together for faster loading, and to enable
 large files to be continuous on disk. Backup copies of the superblock
 and group descriptors are always at the beginning of block groups, even
-if flex\_bg is enabled. The number of block groups that make up a
-flex\_bg is given by 2 ^ ``sb.s_log_groups_per_flex``.
+if flex_bg is enabled. The number of block groups that make up a
+flex_bg is given by 2 ^ ``sb.s_log_groups_per_flex``.
 
 Meta Block Groups
 -----------------
 
-Without the option META\_BG, for safety concerns, all block group
+Without the option META_BG, for safety concerns, all block group
 descriptors copies are kept in the first block group. Given the default
 128MiB(2^27 bytes) block group size and 64-byte group descriptors, ext4
 can have at most 2^27/64 = 2^21 block groups. This limits the entire
 filesystem size to 2^21 * 2^27 = 2^48bytes or 256TiB.
 
 The solution to this problem is to use the metablock group feature
-(META\_BG), which is already in ext3 for all 2.6 releases. With the
-META\_BG feature, ext4 filesystems are partitioned into many metablock
+(META_BG), which is already in ext3 for all 2.6 releases. With the
+META_BG feature, ext4 filesystems are partitioned into many metablock
 groups. Each metablock group is a cluster of block groups whose group
 descriptor structures can be stored in a single disk block. For ext4
 filesystems with 4 KB block size, a single metablock group partition
@@ -110,7 +110,7 @@ bytes, a meta-block group contains 32 block groups for filesystems with
 a 1KB block size, and 128 block groups for filesystems with a 4KB
 blocksize. Filesystems can either be created using this new block group
 descriptor layout, or existing filesystems can be resized on-line, and
-the field s\_first\_meta\_bg in the superblock will indicate the first
+the field s_first_meta_bg in the superblock will indicate the first
 block group using this new layout.
 
 Please see an important note about ``BLOCK_UNINIT`` in the section about
@@ -121,15 +121,15 @@ Lazy Block Group Initialization
 
 A new feature for ext4 are three block group descriptor flags that
 enable mkfs to skip initializing other parts of the block group
-metadata. Specifically, the INODE\_UNINIT and BLOCK\_UNINIT flags mean
+metadata. Specifically, the INODE_UNINIT and BLOCK_UNINIT flags mean
 that the inode and block bitmaps for that group can be calculated and
 therefore the on-disk bitmap blocks are not initialized. This is
 generally the case for an empty block group or a block group containing
-only fixed-location block group metadata. The INODE\_ZEROED flag means
+only fixed-location block group metadata. The INODE_ZEROED flag means
 that the inode table has been initialized; mkfs will unset this flag and
 rely on the kernel to initialize the inode tables in the background.
 
 By not writing zeroes to the bitmaps and inode table, mkfs time is
-reduced considerably. Note the feature flag is RO\_COMPAT\_GDT\_CSUM,
-but the dumpe2fs output prints this as “uninit\_bg”. They are the same
+reduced considerably. Note the feature flag is RO_COMPAT_GDT_CSUM,
+but the dumpe2fs output prints this as “uninit_bg”. They are the same
 thing.
index 30e2575..2bd9904 100644 (file)
@@ -1,7 +1,7 @@
 .. SPDX-License-Identifier: GPL-2.0
 
 +---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| i.i\_block Offset   | Where It Points                                                                                                                                                                                                              |
+| i.i_block Offset   | Where It Points                                                                                                                                                                                                              |
 +=====================+==============================================================================================================================================================================================================================+
 | 0 to 11             | Direct map to file blocks 0 to 11.                                                                                                                                                                                           |
 +---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
index 5519e25..e232749 100644 (file)
@@ -4,7 +4,7 @@ Checksums
 ---------
 
 Starting in early 2012, metadata checksums were added to all major ext4
-and jbd2 data structures. The associated feature flag is metadata\_csum.
+and jbd2 data structures. The associated feature flag is metadata_csum.
 The desired checksum algorithm is indicated in the superblock, though as
 of October 2012 the only supported algorithm is crc32c. Some data
 structures did not have space to fit a full 32-bit checksum, so only the
@@ -20,7 +20,7 @@ encounters directory blocks that lack sufficient empty space to add a
 checksum, it will request that you run ``e2fsck -D`` to have the
 directories rebuilt with checksums. This has the added benefit of
 removing slack space from the directory files and rebalancing the htree
-indexes. If you \_ignore\_ this step, your directories will not be
+indexes. If you _ignore_ this step, your directories will not be
 protected by a checksum!
 
 The following table describes the data elements that go into each type
@@ -35,39 +35,39 @@ of checksum. The checksum function is whatever the superblock describes
      - Length
      - Ingredients
    * - Superblock
-     - \_\_le32
+     - __le32
      - The entire superblock up to the checksum field. The UUID lives inside
        the superblock.
    * - MMP
-     - \_\_le32
+     - __le32
      - UUID + the entire MMP block up to the checksum field.
    * - Extended Attributes
-     - \_\_le32
+     - __le32
      - UUID + the entire extended attribute block. The checksum field is set to
        zero.
    * - Directory Entries
-     - \_\_le32
+     - __le32
      - UUID + inode number + inode generation + the directory block up to the
        fake entry enclosing the checksum field.
    * - HTREE Nodes
-     - \_\_le32
+     - __le32
      - UUID + inode number + inode generation + all valid extents + HTREE tail.
        The checksum field is set to zero.
    * - Extents
-     - \_\_le32
+     - __le32
      - UUID + inode number + inode generation + the entire extent block up to
        the checksum field.
    * - Bitmaps
-     - \_\_le32 or \_\_le16
+     - __le32 or __le16
      - UUID + the entire bitmap. Checksums are stored in the group descriptor,
        and truncated if the group descriptor size is 32 bytes (i.e. ^64bit)
    * - Inodes
-     - \_\_le32
+     - __le32
      - UUID + inode number + inode generation + the entire inode. The checksum
        field is set to zero. Each inode has its own checksum.
    * - Group Descriptors
-     - \_\_le16
-     - If metadata\_csum, then UUID + group number + the entire descriptor;
-       else if gdt\_csum, then crc16(UUID + group number + the entire
+     - __le16
+     - If metadata_csum, then UUID + group number + the entire descriptor;
+       else if gdt_csum, then crc16(UUID + group number + the entire
        descriptor). In all cases, only the lower 16 bits are stored.
 
index 55f618b..6eece8e 100644 (file)
@@ -42,24 +42,24 @@ is at most 263 bytes long, though on disk you'll need to reference
      - Name
      - Description
    * - 0x0
-     - \_\_le32
+     - __le32
      - inode
      - Number of the inode that this directory entry points to.
    * - 0x4
-     - \_\_le16
-     - rec\_len
+     - __le16
+     - rec_len
      - Length of this directory entry. Must be a multiple of 4.
    * - 0x6
-     - \_\_le16
-     - name\_len
+     - __le16
+     - name_len
      - Length of the file name.
    * - 0x8
      - char
-     - name[EXT4\_NAME\_LEN]
+     - name[EXT4_NAME_LEN]
      - File name.
 
 Since file names cannot be longer than 255 bytes, the new directory
-entry format shortens the name\_len field and uses the space for a file
+entry format shortens the name_len field and uses the space for a file
 type flag, probably to avoid having to load every inode during directory
 tree traversal. This format is ``ext4_dir_entry_2``, which is at most
 263 bytes long, though on disk you'll need to reference
@@ -74,24 +74,24 @@ tree traversal. This format is ``ext4_dir_entry_2``, which is at most
      - Name
      - Description
    * - 0x0
-     - \_\_le32
+     - __le32
      - inode
      - Number of the inode that this directory entry points to.
    * - 0x4
-     - \_\_le16
-     - rec\_len
+     - __le16
+     - rec_len
      - Length of this directory entry.
    * - 0x6
-     - \_\_u8
-     - name\_len
+     - __u8
+     - name_len
      - Length of the file name.
    * - 0x7
-     - \_\_u8
-     - file\_type
+     - __u8
+     - file_type
      - File type code, see ftype_ table below.
    * - 0x8
      - char
-     - name[EXT4\_NAME\_LEN]
+     - name[EXT4_NAME_LEN]
      - File name.
 
 .. _ftype:
@@ -137,19 +137,19 @@ entry uses this extension, it may be up to 271 bytes.
      - Name
      - Description
    * - 0x0
-     - \_\_le32
+     - __le32
      - hash
      - The hash of the directory name
    * - 0x4
-     - \_\_le32
-     - minor\_hash
+     - __le32
+     - minor_hash
      - The minor hash of the directory name
 
 
 In order to add checksums to these classic directory blocks, a phony
 ``struct ext4_dir_entry`` is placed at the end of each leaf block to
 hold the checksum. The directory entry is 12 bytes long. The inode
-number and name\_len fields are set to zero to fool old software into
+number and name_len fields are set to zero to fool old software into
 ignoring an apparently empty directory entry, and the checksum is stored
 in the place where the name normally goes. The structure is
 ``struct ext4_dir_entry_tail``:
@@ -163,24 +163,24 @@ in the place where the name normally goes. The structure is
      - Name
      - Description
    * - 0x0
-     - \_\_le32
-     - det\_reserved\_zero1
+     - __le32
+     - det_reserved_zero1
      - Inode number, which must be zero.
    * - 0x4
-     - \_\_le16
-     - det\_rec\_len
+     - __le16
+     - det_rec_len
      - Length of this directory entry, which must be 12.
    * - 0x6
-     - \_\_u8
-     - det\_reserved\_zero2
+     - __u8
+     - det_reserved_zero2
      - Length of the file name, which must be zero.
    * - 0x7
-     - \_\_u8
-     - det\_reserved\_ft
+     - __u8
+     - det_reserved_ft
      - File type, which must be 0xDE.
    * - 0x8
-     - \_\_le32
-     - det\_checksum
+     - __le32
+     - det_checksum
      - Directory leaf block checksum.
 
 The leaf directory block checksum is calculated against the FS UUID, the
@@ -194,7 +194,7 @@ Hash Tree Directories
 A linear array of directory entries isn't great for performance, so a
 new feature was added to ext3 to provide a faster (but peculiar)
 balanced tree keyed off a hash of the directory entry name. If the
-EXT4\_INDEX\_FL (0x1000) flag is set in the inode, this directory uses a
+EXT4_INDEX_FL (0x1000) flag is set in the inode, this directory uses a
 hashed btree (htree) to organize and find directory entries. For
 backwards read-only compatibility with ext2, this tree is actually
 hidden inside the directory file, masquerading as “empty” directory data
@@ -206,14 +206,14 @@ rest of the directory block is empty so that it moves on.
 The root of the tree always lives in the first data block of the
 directory. By ext2 custom, the '.' and '..' entries must appear at the
 beginning of this first block, so they are put here as two
-``struct ext4_dir_entry_2``\ s and not stored in the tree. The rest of
+``struct ext4_dir_entry_2`` s and not stored in the tree. The rest of
 the root node contains metadata about the tree and finally a hash->block
 map to find nodes that are lower in the htree. If
 ``dx_root.info.indirect_levels`` is non-zero then the htree has two
 levels; the data block pointed to by the root node's map is an interior
 node, which is indexed by a minor hash. Interior nodes in this tree
 contains a zeroed out ``struct ext4_dir_entry_2`` followed by a
-minor\_hash->block map to find leafe nodes. Leaf nodes contain a linear
+minor_hash->block map to find leafe nodes. Leaf nodes contain a linear
 array of all ``struct ext4_dir_entry_2``; all of these entries
 (presumably) hash to the same value. If there is an overflow, the
 entries simply overflow into the next leaf node, and the
@@ -245,83 +245,83 @@ of a data block:
      - Name
      - Description
    * - 0x0
-     - \_\_le32
+     - __le32
      - dot.inode
      - inode number of this directory.
    * - 0x4
-     - \_\_le16
-     - dot.rec\_len
+     - __le16
+     - dot.rec_len
      - Length of this record, 12.
    * - 0x6
      - u8
-     - dot.name\_len
+     - dot.name_len
      - Length of the name, 1.
    * - 0x7
      - u8
-     - dot.file\_type
+     - dot.file_type
      - File type of this entry, 0x2 (directory) (if the feature flag is set).
    * - 0x8
      - char
      - dot.name[4]
-     - “.\\0\\0\\0”
+     - “.\0\0\0”
    * - 0xC
-     - \_\_le32
+     - __le32
      - dotdot.inode
      - inode number of parent directory.
    * - 0x10
-     - \_\_le16
-     - dotdot.rec\_len
-     - block\_size - 12. The record length is long enough to cover all htree
+     - __le16
+     - dotdot.rec_len
+     - block_size - 12. The record length is long enough to cover all htree
        data.
    * - 0x12
      - u8
-     - dotdot.name\_len
+     - dotdot.name_len
      - Length of the name, 2.
    * - 0x13
      - u8
-     - dotdot.file\_type
+     - dotdot.file_type
      - File type of this entry, 0x2 (directory) (if the feature flag is set).
    * - 0x14
      - char
-     - dotdot\_name[4]
-     - “..\\0\\0”
+     - dotdot_name[4]
+     - “..\0\0”
    * - 0x18
-     - \_\_le32
-     - struct dx\_root\_info.reserved\_zero
+     - __le32
+     - struct dx_root_info.reserved_zero
      - Zero.
    * - 0x1C
      - u8
-     - struct dx\_root\_info.hash\_version
+     - struct dx_root_info.hash_version
      - Hash type, see dirhash_ table below.
    * - 0x1D
      - u8
-     - struct dx\_root\_info.info\_length
+     - struct dx_root_info.info_length
      - Length of the tree information, 0x8.
    * - 0x1E
      - u8
-     - struct dx\_root\_info.indirect\_levels
-     - Depth of the htree. Cannot be larger than 3 if the INCOMPAT\_LARGEDIR
+     - struct dx_root_info.indirect_levels
+     - Depth of the htree. Cannot be larger than 3 if the INCOMPAT_LARGEDIR
        feature is set; cannot be larger than 2 otherwise.
    * - 0x1F
      - u8
-     - struct dx\_root\_info.unused\_flags
+     - struct dx_root_info.unused_flags
      -
    * - 0x20
-     - \_\_le16
+     - __le16
      - limit
-     - Maximum number of dx\_entries that can follow this header, plus 1 for
+     - Maximum number of dx_entries that can follow this header, plus 1 for
        the header itself.
    * - 0x22
-     - \_\_le16
+     - __le16
      - count
-     - Actual number of dx\_entries that follow this header, plus 1 for the
+     - Actual number of dx_entries that follow this header, plus 1 for the
        header itself.
    * - 0x24
-     - \_\_le32
+     - __le32
      - block
      - The block number (within the directory file) that goes with hash=0.
    * - 0x28
-     - struct dx\_entry
+     - struct dx_entry
      - entries[0]
      - As many 8-byte ``struct dx_entry`` as fits in the rest of the data block.
 
@@ -362,38 +362,38 @@ also the full length of a data block:
      - Name
      - Description
    * - 0x0
-     - \_\_le32
+     - __le32
      - fake.inode
      - Zero, to make it look like this entry is not in use.
    * - 0x4
-     - \_\_le16
-     - fake.rec\_len
-     - The size of the block, in order to hide all of the dx\_node data.
+     - __le16
+     - fake.rec_len
+     - The size of the block, in order to hide all of the dx_node data.
    * - 0x6
      - u8
-     - name\_len
+     - name_len
      - Zero. There is no name for this “unused” directory entry.
    * - 0x7
      - u8
-     - file\_type
+     - file_type
      - Zero. There is no file type for this “unused” directory entry.
    * - 0x8
-     - \_\_le16
+     - __le16
      - limit
-     - Maximum number of dx\_entries that can follow this header, plus 1 for
+     - Maximum number of dx_entries that can follow this header, plus 1 for
        the header itself.
    * - 0xA
-     - \_\_le16
+     - __le16
      - count
-     - Actual number of dx\_entries that follow this header, plus 1 for the
+     - Actual number of dx_entries that follow this header, plus 1 for the
        header itself.
    * - 0xE
-     - \_\_le32
+     - __le32
      - block
      - The block number (within the directory file) that goes with the lowest
        hash value of this block. This value is stored in the parent block.
    * - 0x12
-     - struct dx\_entry
+     - struct dx_entry
      - entries[0]
      - As many 8-byte ``struct dx_entry`` as fits in the rest of the data block.
 
@@ -410,11 +410,11 @@ long:
      - Name
      - Description
    * - 0x0
-     - \_\_le32
+     - __le32
      - hash
      - Hash code.
    * - 0x4
-     - \_\_le32
+     - __le32
      - block
      - Block number (within the directory file, not filesystem blocks) of the
        next node in the htree.
@@ -423,13 +423,13 @@ long:
 author.)
 
 If metadata checksums are enabled, the last 8 bytes of the directory
-block (precisely the length of one dx\_entry) are used to store a
+block (precisely the length of one dx_entry) are used to store a
 ``struct dx_tail``, which contains the checksum. The ``limit`` and
-``count`` entries in the dx\_root/dx\_node structures are adjusted as
-necessary to fit the dx\_tail into the block. If there is no space for
-the dx\_tail, the user is notified to run e2fsck -D to rebuild the
+``count`` entries in the dx_root/dx_node structures are adjusted as
+necessary to fit the dx_tail into the block. If there is no space for
+the dx_tail, the user is notified to run e2fsck -D to rebuild the
 directory index (which will ensure that there's space for the checksum.
-The dx\_tail structure is 8 bytes long and looks like this:
+The dx_tail structure is 8 bytes long and looks like this:
 
 .. list-table::
    :widths: 8 8 24 40
@@ -441,13 +441,13 @@ The dx\_tail structure is 8 bytes long and looks like this:
      - Description
    * - 0x0
      - u32
-     - dt\_reserved
+     - dt_reserved
      - Zero.
    * - 0x4
-     - \_\_le32
-     - dt\_checksum
+     - __le32
+     - dt_checksum
      - Checksum of the htree directory block.
 
 The checksum is calculated against the FS UUID, the htree index header
-(dx\_root or dx\_node), all of the htree indices (dx\_entry) that are in
-use, and the tail block (dx\_tail).
+(dx_root or dx_node), all of the htree indices (dx_entry) that are in
+use, and the tail block (dx_tail).
index ecc0d01..7a2ef26 100644 (file)
@@ -5,14 +5,14 @@ Large Extended Attribute Values
 
 To enable ext4 to store extended attribute values that do not fit in the
 inode or in the single extended attribute block attached to an inode,
-the EA\_INODE feature allows us to store the value in the data blocks of
+the EA_INODE feature allows us to store the value in the data blocks of
 a regular file inode. This “EA inode” is linked only from the extended
 attribute name index and must not appear in a directory entry. The
-inode's i\_atime field is used to store a checksum of the xattr value;
-and i\_ctime/i\_version store a 64-bit reference count, which enables
+inode's i_atime field is used to store a checksum of the xattr value;
+and i_ctime/i_version store a 64-bit reference count, which enables
 sharing of large xattr values between multiple owning inodes. For
 backward compatibility with older versions of this feature, the
-i\_mtime/i\_generation *may* store a back-reference to the inode number
-and i\_generation of the **one** owning inode (in cases where the EA
+i_mtime/i_generation *may* store a back-reference to the inode number
+and i_generation of the **one** owning inode (in cases where the EA
 inode is not referenced by multiple inodes) to verify that the EA inode
 is the correct one being accessed.
index 7ba6114..392ec44 100644 (file)
@@ -7,34 +7,34 @@ Each block group on the filesystem has one of these descriptors
 associated with it. As noted in the Layout section above, the group
 descriptors (if present) are the second item in the block group. The
 standard configuration is for each block group to contain a full copy of
-the block group descriptor table unless the sparse\_super feature flag
+the block group descriptor table unless the sparse_super feature flag
 is set.
 
 Notice how the group descriptor records the location of both bitmaps and
 the inode table (i.e. they can float). This means that within a block
 group, the only data structures with fixed locations are the superblock
-and the group descriptor table. The flex\_bg mechanism uses this
+and the group descriptor table. The flex_bg mechanism uses this
 property to group several block groups into a flex group and lay out all
 of the groups' bitmaps and inode tables into one long run in the first
 group of the flex group.
 
-If the meta\_bg feature flag is set, then several block groups are
-grouped together into a meta group. Note that in the meta\_bg case,
+If the meta_bg feature flag is set, then several block groups are
+grouped together into a meta group. Note that in the meta_bg case,
 however, the first and last two block groups within the larger meta
 group contain only group descriptors for the groups inside the meta
 group.
 
-flex\_bg and meta\_bg do not appear to be mutually exclusive features.
+flex_bg and meta_bg do not appear to be mutually exclusive features.
 
 In ext2, ext3, and ext4 (when the 64bit feature is not enabled), the
 block group descriptor was only 32 bytes long and therefore ends at
-bg\_checksum. On an ext4 filesystem with the 64bit feature enabled, the
+bg_checksum. On an ext4 filesystem with the 64bit feature enabled, the
 block group descriptor expands to at least the 64 bytes described below;
 the size is stored in the superblock.
 
-If gdt\_csum is set and metadata\_csum is not set, the block group
+If gdt_csum is set and metadata_csum is not set, the block group
 checksum is the crc16 of the FS UUID, the group number, and the group
-descriptor structure. If metadata\_csum is set, then the block group
+descriptor structure. If metadata_csum is set, then the block group
 checksum is the lower 16 bits of the checksum of the FS UUID, the group
 number, and the group descriptor structure. Both block and inode bitmap
 checksums are calculated against the FS UUID, the group number, and the
@@ -51,59 +51,59 @@ The block group descriptor is laid out in ``struct ext4_group_desc``.
      - Name
      - Description
    * - 0x0
-     - \_\_le32
-     - bg\_block\_bitmap\_lo
+     - __le32
+     - bg_block_bitmap_lo
      - Lower 32-bits of location of block bitmap.
    * - 0x4
-     - \_\_le32
-     - bg\_inode\_bitmap\_lo
+     - __le32
+     - bg_inode_bitmap_lo
      - Lower 32-bits of location of inode bitmap.
    * - 0x8
-     - \_\_le32
-     - bg\_inode\_table\_lo
+     - __le32
+     - bg_inode_table_lo
      - Lower 32-bits of location of inode table.
    * - 0xC
-     - \_\_le16
-     - bg\_free\_blocks\_count\_lo
+     - __le16
+     - bg_free_blocks_count_lo
      - Lower 16-bits of free block count.
    * - 0xE
-     - \_\_le16
-     - bg\_free\_inodes\_count\_lo
+     - __le16
+     - bg_free_inodes_count_lo
      - Lower 16-bits of free inode count.
    * - 0x10
-     - \_\_le16
-     - bg\_used\_dirs\_count\_lo
+     - __le16
+     - bg_used_dirs_count_lo
      - Lower 16-bits of directory count.
    * - 0x12
-     - \_\_le16
-     - bg\_flags
+     - __le16
+     - bg_flags
      - Block group flags. See the bgflags_ table below.
    * - 0x14
-     - \_\_le32
-     - bg\_exclude\_bitmap\_lo
+     - __le32
+     - bg_exclude_bitmap_lo
      - Lower 32-bits of location of snapshot exclusion bitmap.
    * - 0x18
-     - \_\_le16
-     - bg\_block\_bitmap\_csum\_lo
+     - __le16
+     - bg_block_bitmap_csum_lo
      - Lower 16-bits of the block bitmap checksum.
    * - 0x1A
-     - \_\_le16
-     - bg\_inode\_bitmap\_csum\_lo
+     - __le16
+     - bg_inode_bitmap_csum_lo
      - Lower 16-bits of the inode bitmap checksum.
    * - 0x1C
-     - \_\_le16
-     - bg\_itable\_unused\_lo
+     - __le16
+     - bg_itable_unused_lo
      - Lower 16-bits of unused inode count. If set, we needn't scan past the
-       ``(sb.s_inodes_per_group - gdt.bg_itable_unused)``\ th entry in the
+       ``(sb.s_inodes_per_group - gdt.bg_itable_unused)`` th entry in the
        inode table for this group.
    * - 0x1E
-     - \_\_le16
-     - bg\_checksum
-     - Group descriptor checksum; crc16(sb\_uuid+group\_num+bg\_desc) if the
-       RO\_COMPAT\_GDT\_CSUM feature is set, or
-       crc32c(sb\_uuid+group\_num+bg\_desc) & 0xFFFF if the
-       RO\_COMPAT\_METADATA\_CSUM feature is set.  The bg\_checksum
-       field in bg\_desc is skipped when calculating crc16 checksum,
+     - __le16
+     - bg_checksum
+     - Group descriptor checksum; crc16(sb_uuid+group_num+bg_desc) if the
+       RO_COMPAT_GDT_CSUM feature is set, or
+       crc32c(sb_uuid+group_num+bg_desc) & 0xFFFF if the
+       RO_COMPAT_METADATA_CSUM feature is set.  The bg_checksum
+       field in bg_desc is skipped when calculating crc16 checksum,
        and set to zero if crc32c checksum is used.
    * -
      -
@@ -111,48 +111,48 @@ The block group descriptor is laid out in ``struct ext4_group_desc``.
      - These fields only exist if the 64bit feature is enabled and s_desc_size
        > 32.
    * - 0x20
-     - \_\_le32
-     - bg\_block\_bitmap\_hi
+     - __le32
+     - bg_block_bitmap_hi
      - Upper 32-bits of location of block bitmap.
    * - 0x24
-     - \_\_le32
-     - bg\_inode\_bitmap\_hi
+     - __le32
+     - bg_inode_bitmap_hi
      - Upper 32-bits of location of inodes bitmap.
    * - 0x28
-     - \_\_le32
-     - bg\_inode\_table\_hi
+     - __le32
+     - bg_inode_table_hi
      - Upper 32-bits of location of inodes table.
    * - 0x2C
-     - \_\_le16
-     - bg\_free\_blocks\_count\_hi
+     - __le16
+     - bg_free_blocks_count_hi
      - Upper 16-bits of free block count.
    * - 0x2E
-     - \_\_le16
-     - bg\_free\_inodes\_count\_hi
+     - __le16
+     - bg_free_inodes_count_hi
      - Upper 16-bits of free inode count.
    * - 0x30
-     - \_\_le16
-     - bg\_used\_dirs\_count\_hi
+     - __le16
+     - bg_used_dirs_count_hi
      - Upper 16-bits of directory count.
    * - 0x32
-     - \_\_le16
-     - bg\_itable\_unused\_hi
+     - __le16
+     - bg_itable_unused_hi
      - Upper 16-bits of unused inode count.
    * - 0x34
-     - \_\_le32
-     - bg\_exclude\_bitmap\_hi
+     - __le32
+     - bg_exclude_bitmap_hi
      - Upper 32-bits of location of snapshot exclusion bitmap.
    * - 0x38
-     - \_\_le16
-     - bg\_block\_bitmap\_csum\_hi
+     - __le16
+     - bg_block_bitmap_csum_hi
      - Upper 16-bits of the block bitmap checksum.
    * - 0x3A
-     - \_\_le16
-     - bg\_inode\_bitmap\_csum\_hi
+     - __le16
+     - bg_inode_bitmap_csum_hi
      - Upper 16-bits of the inode bitmap checksum.
    * - 0x3C
-     - \_\_u32
-     - bg\_reserved
+     - __u32
+     - bg_reserved
      - Padding to 64 bytes.
 
 .. _bgflags:
@@ -166,8 +166,8 @@ Block group flags can be any combination of the following:
    * - Value
      - Description
    * - 0x1
-     - inode table and bitmap are not initialized (EXT4\_BG\_INODE\_UNINIT).
+     - inode table and bitmap are not initialized (EXT4_BG_INODE_UNINIT).
    * - 0x2
-     - block bitmap is not initialized (EXT4\_BG\_BLOCK\_UNINIT).
+     - block bitmap is not initialized (EXT4_BG_BLOCK_UNINIT).
    * - 0x4
-     - inode table is zeroed (EXT4\_BG\_INODE\_ZEROED).
+     - inode table is zeroed (EXT4_BG_INODE_ZEROED).
index b9816d5..dc31f50 100644 (file)
@@ -1,6 +1,6 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-The Contents of inode.i\_block
+The Contents of inode.i_block
 ------------------------------
 
 Depending on the type of file an inode describes, the 60 bytes of
@@ -47,7 +47,7 @@ In ext4, the file to logical block map has been replaced with an extent
 tree. Under the old scheme, allocating a contiguous run of 1,000 blocks
 requires an indirect block to map all 1,000 entries; with extents, the
 mapping is reduced to a single ``struct ext4_extent`` with
-``ee_len = 1000``. If flex\_bg is enabled, it is possible to allocate
+``ee_len = 1000``. If flex_bg is enabled, it is possible to allocate
 very large files with a single extent, at a considerable reduction in
 metadata block use, and some improvement in disk efficiency. The inode
 must have the extents flag (0x80000) flag set for this feature to be in
@@ -76,28 +76,28 @@ which is 12 bytes long:
      - Name
      - Description
    * - 0x0
-     - \_\_le16
-     - eh\_magic
+     - __le16
+     - eh_magic
      - Magic number, 0xF30A.
    * - 0x2
-     - \_\_le16
-     - eh\_entries
+     - __le16
+     - eh_entries
      - Number of valid entries following the header.
    * - 0x4
-     - \_\_le16
-     - eh\_max
+     - __le16
+     - eh_max
      - Maximum number of entries that could follow the header.
    * - 0x6
-     - \_\_le16
-     - eh\_depth
+     - __le16
+     - eh_depth
      - Depth of this extent node in the extent tree. 0 = this extent node
        points to data blocks; otherwise, this extent node points to other
        extent nodes. The extent tree can be at most 5 levels deep: a logical
        block number can be at most ``2^32``, and the smallest ``n`` that
        satisfies ``4*(((blocksize - 12)/12)^n) >= 2^32`` is 5.
    * - 0x8
-     - \_\_le32
-     - eh\_generation
+     - __le32
+     - eh_generation
      - Generation of the tree. (Used by Lustre, but not standard ext4).
 
 Internal nodes of the extent tree, also known as index nodes, are
@@ -112,22 +112,22 @@ recorded as ``struct ext4_extent_idx``, and are 12 bytes long:
      - Name
      - Description
    * - 0x0
-     - \_\_le32
-     - ei\_block
+     - __le32
+     - ei_block
      - This index node covers file blocks from 'block' onward.
    * - 0x4
-     - \_\_le32
-     - ei\_leaf\_lo
+     - __le32
+     - ei_leaf_lo
      - Lower 32-bits of the block number of the extent node that is the next
        level lower in the tree. The tree node pointed to can be either another
        internal node or a leaf node, described below.
    * - 0x8
-     - \_\_le16
-     - ei\_leaf\_hi
+     - __le16
+     - ei_leaf_hi
      - Upper 16-bits of the previous field.
    * - 0xA
-     - \_\_u16
-     - ei\_unused
+     - __u16
+     - ei_unused
      -
 
 Leaf nodes of the extent tree are recorded as ``struct ext4_extent``,
@@ -142,24 +142,24 @@ and are also 12 bytes long:
      - Name
      - Description
    * - 0x0
-     - \_\_le32
-     - ee\_block
+     - __le32
+     - ee_block
      - First file block number that this extent covers.
    * - 0x4
-     - \_\_le16
-     - ee\_len
+     - __le16
+     - ee_len
      - Number of blocks covered by extent. If the value of this field is <=
        32768, the extent is initialized. If the value of the field is > 32768,
        the extent is uninitialized and the actual extent length is ``ee_len`` -
        32768. Therefore, the maximum length of a initialized extent is 32768
        blocks, and the maximum length of an uninitialized extent is 32767.
    * - 0x6
-     - \_\_le16
-     - ee\_start\_hi
+     - __le16
+     - ee_start_hi
      - Upper 16-bits of the block number to which this extent points.
    * - 0x8
-     - \_\_le32
-     - ee\_start\_lo
+     - __le32
+     - ee_start_lo
      - Lower 32-bits of the block number to which this extent points.
 
 Prior to the introduction of metadata checksums, the extent header +
@@ -182,8 +182,8 @@ including) the checksum itself.
      - Name
      - Description
    * - 0x0
-     - \_\_le32
-     - eb\_checksum
+     - __le32
+     - eb_checksum
      - Checksum of the extent block, crc32c(uuid+inum+igeneration+extentblock)
 
 Inline Data
index d107517..a728af0 100644 (file)
@@ -11,12 +11,12 @@ file is smaller than 60 bytes, then the data are stored inline in
 attribute space, then it might be found as an extended attribute
 “system.data” within the inode body (“ibody EA”). This of course
 constrains the amount of extended attributes one can attach to an inode.
-If the data size increases beyond i\_block + ibody EA, a regular block
+If the data size increases beyond i_block + ibody EA, a regular block
 is allocated and the contents moved to that block.
 
 Pending a change to compact the extended attribute key used to store
 inline data, one ought to be able to store 160 bytes of data in a
-256-byte inode (as of June 2015, when i\_extra\_isize is 28). Prior to
+256-byte inode (as of June 2015, when i_extra_isize is 28). Prior to
 that, the limit was 156 bytes due to inefficient use of inode space.
 
 The inline data feature requires the presence of an extended attribute
@@ -25,12 +25,12 @@ for “system.data”, even if the attribute value is zero length.
 Inline Directories
 ~~~~~~~~~~~~~~~~~~
 
-The first four bytes of i\_block are the inode number of the parent
+The first four bytes of i_block are the inode number of the parent
 directory. Following that is a 56-byte space for an array of directory
 entries; see ``struct ext4_dir_entry``. If there is a “system.data”
 attribute in the inode body, the EA value is an array of
 ``struct ext4_dir_entry`` as well. Note that for inline directories, the
-i\_block and EA space are treated as separate dirent blocks; directory
+i_block and EA space are treated as separate dirent blocks; directory
 entries cannot span the two.
 
 Inline directory entries are not checksummed, as the inode checksum
index 6c5ce66..cfc6c16 100644 (file)
@@ -38,138 +38,138 @@ The inode table entry is laid out in ``struct ext4_inode``.
      - Name
      - Description
    * - 0x0
-     - \_\_le16
-     - i\_mode
+     - __le16
+     - i_mode
      - File mode. See the table i_mode_ below.
    * - 0x2
-     - \_\_le16
-     - i\_uid
+     - __le16
+     - i_uid
      - Lower 16-bits of Owner UID.
    * - 0x4
-     - \_\_le32
-     - i\_size\_lo
+     - __le32
+     - i_size_lo
      - Lower 32-bits of size in bytes.
    * - 0x8
-     - \_\_le32
-     - i\_atime
-     - Last access time, in seconds since the epoch. However, if the EA\_INODE
+     - __le32
+     - i_atime
+     - Last access time, in seconds since the epoch. However, if the EA_INODE
        inode flag is set, this inode stores an extended attribute value and
        this field contains the checksum of the value.
    * - 0xC
-     - \_\_le32
-     - i\_ctime
+     - __le32
+     - i_ctime
      - Last inode change time, in seconds since the epoch. However, if the
-       EA\_INODE inode flag is set, this inode stores an extended attribute
+       EA_INODE inode flag is set, this inode stores an extended attribute
        value and this field contains the lower 32 bits of the attribute value's
        reference count.
    * - 0x10
-     - \_\_le32
-     - i\_mtime
+     - __le32
+     - i_mtime
      - Last data modification time, in seconds since the epoch. However, if the
-       EA\_INODE inode flag is set, this inode stores an extended attribute
+       EA_INODE inode flag is set, this inode stores an extended attribute
        value and this field contains the number of the inode that owns the
        extended attribute.
    * - 0x14
-     - \_\_le32
-     - i\_dtime
+     - __le32
+     - i_dtime
      - Deletion Time, in seconds since the epoch.
    * - 0x18
-     - \_\_le16
-     - i\_gid
+     - __le16
+     - i_gid
      - Lower 16-bits of GID.
    * - 0x1A
-     - \_\_le16
-     - i\_links\_count
+     - __le16
+     - i_links_count
      - Hard link count. Normally, ext4 does not permit an inode to have more
        than 65,000 hard links. This applies to files as well as directories,
        which means that there cannot be more than 64,998 subdirectories in a
        directory (each subdirectory's '..' entry counts as a hard link, as does
-       the '.' entry in the directory itself). With the DIR\_NLINK feature
+       the '.' entry in the directory itself). With the DIR_NLINK feature
        enabled, ext4 supports more than 64,998 subdirectories by setting this
        field to 1 to indicate that the number of hard links is not known.
    * - 0x1C
-     - \_\_le32
-     - i\_blocks\_lo
-     - Lower 32-bits of “block” count. If the huge\_file feature flag is not
+     - __le32
+     - i_blocks_lo
+     - Lower 32-bits of “block” count. If the huge_file feature flag is not
        set on the filesystem, the file consumes ``i_blocks_lo`` 512-byte blocks
-       on disk. If huge\_file is set and EXT4\_HUGE\_FILE\_FL is NOT set in
+       on disk. If huge_file is set and EXT4_HUGE_FILE_FL is NOT set in
        ``inode.i_flags``, then the file consumes ``i_blocks_lo + (i_blocks_hi
-       << 32)`` 512-byte blocks on disk. If huge\_file is set and
-       EXT4\_HUGE\_FILE\_FL IS set in ``inode.i_flags``, then this file
+       << 32)`` 512-byte blocks on disk. If huge_file is set and
+       EXT4_HUGE_FILE_FL IS set in ``inode.i_flags``, then this file
        consumes (``i_blocks_lo + i_blocks_hi`` << 32) filesystem blocks on
        disk.
    * - 0x20
-     - \_\_le32
-     - i\_flags
+     - __le32
+     - i_flags
      - Inode flags. See the table i_flags_ below.
    * - 0x24
      - 4 bytes
-     - i\_osd1
+     - i_osd1
      - See the table i_osd1_ for more details.
    * - 0x28
      - 60 bytes
-     - i\_block[EXT4\_N\_BLOCKS=15]
-     - Block map or extent tree. See the section “The Contents of inode.i\_block”.
+     - i_block[EXT4_N_BLOCKS=15]
+     - Block map or extent tree. See the section “The Contents of inode.i_block”.
    * - 0x64
-     - \_\_le32
-     - i\_generation
+     - __le32
+     - i_generation
      - File version (for NFS).
    * - 0x68
-     - \_\_le32
-     - i\_file\_acl\_lo
+     - __le32
+     - i_file_acl_lo
      - Lower 32-bits of extended attribute block. ACLs are of course one of
        many possible extended attributes; I think the name of this field is a
        result of the first use of extended attributes being for ACLs.
    * - 0x6C
-     - \_\_le32
-     - i\_size\_high / i\_dir\_acl
+     - __le32
+     - i_size_high / i_dir_acl
      - Upper 32-bits of file/directory size. In ext2/3 this field was named
-       i\_dir\_acl, though it was usually set to zero and never used.
+       i_dir_acl, though it was usually set to zero and never used.
    * - 0x70
-     - \_\_le32
-     - i\_obso\_faddr
+     - __le32
+     - i_obso_faddr
      - (Obsolete) fragment address.
    * - 0x74
      - 12 bytes
-     - i\_osd2
+     - i_osd2
      - See the table i_osd2_ for more details.
    * - 0x80
-     - \_\_le16
-     - i\_extra\_isize
+     - __le16
+     - i_extra_isize
      - Size of this inode - 128. Alternately, the size of the extended inode
        fields beyond the original ext2 inode, including this field.
    * - 0x82
-     - \_\_le16
-     - i\_checksum\_hi
+     - __le16
+     - i_checksum_hi
      - Upper 16-bits of the inode checksum.
    * - 0x84
-     - \_\_le32
-     - i\_ctime\_extra
+     - __le32
+     - i_ctime_extra
      - Extra change time bits. This provides sub-second precision. See Inode
        Timestamps section.
    * - 0x88
-     - \_\_le32
-     - i\_mtime\_extra
+     - __le32
+     - i_mtime_extra
      - Extra modification time bits. This provides sub-second precision.
    * - 0x8C
-     - \_\_le32
-     - i\_atime\_extra
+     - __le32
+     - i_atime_extra
      - Extra access time bits. This provides sub-second precision.
    * - 0x90
-     - \_\_le32
-     - i\_crtime
+     - __le32
+     - i_crtime
      - File creation time, in seconds since the epoch.
    * - 0x94
-     - \_\_le32
-     - i\_crtime\_extra
+     - __le32
+     - i_crtime_extra
      - Extra file creation time bits. This provides sub-second precision.
    * - 0x98
-     - \_\_le32
-     - i\_version\_hi
+     - __le32
+     - i_version_hi
      - Upper 32-bits for version number.
    * - 0x9C
-     - \_\_le32
-     - i\_projid
+     - __le32
+     - i_projid
      - Project ID.
 
 .. _i_mode:
@@ -183,45 +183,45 @@ The ``i_mode`` value is a combination of the following flags:
    * - Value
      - Description
    * - 0x1
-     - S\_IXOTH (Others may execute)
+     - S_IXOTH (Others may execute)
    * - 0x2
-     - S\_IWOTH (Others may write)
+     - S_IWOTH (Others may write)
    * - 0x4
-     - S\_IROTH (Others may read)
+     - S_IROTH (Others may read)
    * - 0x8
-     - S\_IXGRP (Group members may execute)
+     - S_IXGRP (Group members may execute)
    * - 0x10
-     - S\_IWGRP (Group members may write)
+     - S_IWGRP (Group members may write)
    * - 0x20
-     - S\_IRGRP (Group members may read)
+     - S_IRGRP (Group members may read)
    * - 0x40
-     - S\_IXUSR (Owner may execute)
+     - S_IXUSR (Owner may execute)
    * - 0x80
-     - S\_IWUSR (Owner may write)
+     - S_IWUSR (Owner may write)
    * - 0x100
-     - S\_IRUSR (Owner may read)
+     - S_IRUSR (Owner may read)
    * - 0x200
-     - S\_ISVTX (Sticky bit)
+     - S_ISVTX (Sticky bit)
    * - 0x400
-     - S\_ISGID (Set GID)
+     - S_ISGID (Set GID)
    * - 0x800
-     - S\_ISUID (Set UID)
+     - S_ISUID (Set UID)
    * -
      - These are mutually-exclusive file types:
    * - 0x1000
-     - S\_IFIFO (FIFO)
+     - S_IFIFO (FIFO)
    * - 0x2000
-     - S\_IFCHR (Character device)
+     - S_IFCHR (Character device)
    * - 0x4000
-     - S\_IFDIR (Directory)
+     - S_IFDIR (Directory)
    * - 0x6000
-     - S\_IFBLK (Block device)
+     - S_IFBLK (Block device)
    * - 0x8000
-     - S\_IFREG (Regular file)
+     - S_IFREG (Regular file)
    * - 0xA000
-     - S\_IFLNK (Symbolic link)
+     - S_IFLNK (Symbolic link)
    * - 0xC000
-     - S\_IFSOCK (Socket)
+     - S_IFSOCK (Socket)
 
 .. _i_flags:
 
@@ -234,56 +234,56 @@ The ``i_flags`` field is a combination of these values:
    * - Value
      - Description
    * - 0x1
-     - This file requires secure deletion (EXT4\_SECRM\_FL). (not implemented)
+     - This file requires secure deletion (EXT4_SECRM_FL). (not implemented)
    * - 0x2
      - This file should be preserved, should undeletion be desired
-       (EXT4\_UNRM\_FL). (not implemented)
+       (EXT4_UNRM_FL). (not implemented)
    * - 0x4
-     - File is compressed (EXT4\_COMPR\_FL). (not really implemented)
+     - File is compressed (EXT4_COMPR_FL). (not really implemented)
    * - 0x8
-     - All writes to the file must be synchronous (EXT4\_SYNC\_FL).
+     - All writes to the file must be synchronous (EXT4_SYNC_FL).
    * - 0x10
-     - File is immutable (EXT4\_IMMUTABLE\_FL).
+     - File is immutable (EXT4_IMMUTABLE_FL).
    * - 0x20
-     - File can only be appended (EXT4\_APPEND\_FL).
+     - File can only be appended (EXT4_APPEND_FL).
    * - 0x40
-     - The dump(1) utility should not dump this file (EXT4\_NODUMP\_FL).
+     - The dump(1) utility should not dump this file (EXT4_NODUMP_FL).
    * - 0x80
-     - Do not update access time (EXT4\_NOATIME\_FL).
+     - Do not update access time (EXT4_NOATIME_FL).
    * - 0x100
-     - Dirty compressed file (EXT4\_DIRTY\_FL). (not used)
+     - Dirty compressed file (EXT4_DIRTY_FL). (not used)
    * - 0x200
-     - File has one or more compressed clusters (EXT4\_COMPRBLK\_FL). (not used)
+     - File has one or more compressed clusters (EXT4_COMPRBLK_FL). (not used)
    * - 0x400
-     - Do not compress file (EXT4\_NOCOMPR\_FL). (not used)
+     - Do not compress file (EXT4_NOCOMPR_FL). (not used)
    * - 0x800
-     - Encrypted inode (EXT4\_ENCRYPT\_FL). This bit value previously was
-       EXT4\_ECOMPR\_FL (compression error), which was never used.
+     - Encrypted inode (EXT4_ENCRYPT_FL). This bit value previously was
+       EXT4_ECOMPR_FL (compression error), which was never used.
    * - 0x1000
-     - Directory has hashed indexes (EXT4\_INDEX\_FL).
+     - Directory has hashed indexes (EXT4_INDEX_FL).
    * - 0x2000
-     - AFS magic directory (EXT4\_IMAGIC\_FL).
+     - AFS magic directory (EXT4_IMAGIC_FL).
    * - 0x4000
      - File data must always be written through the journal
-       (EXT4\_JOURNAL\_DATA\_FL).
+       (EXT4_JOURNAL_DATA_FL).
    * - 0x8000
-     - File tail should not be merged (EXT4\_NOTAIL\_FL). (not used by ext4)
+     - File tail should not be merged (EXT4_NOTAIL_FL). (not used by ext4)
    * - 0x10000
      - All directory entry data should be written synchronously (see
-       ``dirsync``) (EXT4\_DIRSYNC\_FL).
+       ``dirsync``) (EXT4_DIRSYNC_FL).
    * - 0x20000
-     - Top of directory hierarchy (EXT4\_TOPDIR\_FL).
+     - Top of directory hierarchy (EXT4_TOPDIR_FL).
    * - 0x40000
-     - This is a huge file (EXT4\_HUGE\_FILE\_FL).
+     - This is a huge file (EXT4_HUGE_FILE_FL).
    * - 0x80000
-     - Inode uses extents (EXT4\_EXTENTS\_FL).
+     - Inode uses extents (EXT4_EXTENTS_FL).
    * - 0x100000
-     - Verity protected file (EXT4\_VERITY\_FL).
+     - Verity protected file (EXT4_VERITY_FL).
    * - 0x200000
      - Inode stores a large extended attribute value in its data blocks
-       (EXT4\_EA\_INODE\_FL).
+       (EXT4_EA_INODE_FL).
    * - 0x400000
-     - This file has blocks allocated past EOF (EXT4\_EOFBLOCKS\_FL).
+     - This file has blocks allocated past EOF (EXT4_EOFBLOCKS_FL).
        (deprecated)
    * - 0x01000000
      - Inode is a snapshot (``EXT4_SNAPFILE_FL``). (not in mainline)
@@ -294,21 +294,21 @@ The ``i_flags`` field is a combination of these values:
      - Snapshot shrink has completed (``EXT4_SNAPFILE_SHRUNK_FL``). (not in
        mainline)
    * - 0x10000000
-     - Inode has inline data (EXT4\_INLINE\_DATA\_FL).
+     - Inode has inline data (EXT4_INLINE_DATA_FL).
    * - 0x20000000
-     - Create children with the same project ID (EXT4\_PROJINHERIT\_FL).
+     - Create children with the same project ID (EXT4_PROJINHERIT_FL).
    * - 0x80000000
-     - Reserved for ext4 library (EXT4\_RESERVED\_FL).
+     - Reserved for ext4 library (EXT4_RESERVED_FL).
    * -
      - Aggregate flags:
    * - 0x705BDFFF
      - User-visible flags.
    * - 0x604BC0FF
-     - User-modifiable flags. Note that while EXT4\_JOURNAL\_DATA\_FL and
-       EXT4\_EXTENTS\_FL can be set with setattr, they are not in the kernel's
-       EXT4\_FL\_USER\_MODIFIABLE mask, since it needs to handle the setting of
+     - User-modifiable flags. Note that while EXT4_JOURNAL_DATA_FL and
+       EXT4_EXTENTS_FL can be set with setattr, they are not in the kernel's
+       EXT4_FL_USER_MODIFIABLE mask, since it needs to handle the setting of
        these flags in a special manner and they are masked out of the set of
-       flags that are saved directly to i\_flags.
+       flags that are saved directly to i_flags.
 
 .. _i_osd1:
 
@@ -325,9 +325,9 @@ Linux:
      - Name
      - Description
    * - 0x0
-     - \_\_le32
-     - l\_i\_version
-     - Inode version. However, if the EA\_INODE inode flag is set, this inode
+     - __le32
+     - l_i_version
+     - Inode version. However, if the EA_INODE inode flag is set, this inode
        stores an extended attribute value and this field contains the upper 32
        bits of the attribute value's reference count.
 
@@ -342,8 +342,8 @@ Hurd:
      - Name
      - Description
    * - 0x0
-     - \_\_le32
-     - h\_i\_translator
+     - __le32
+     - h_i_translator
      - ??
 
 Masix:
@@ -357,8 +357,8 @@ Masix:
      - Name
      - Description
    * - 0x0
-     - \_\_le32
-     - m\_i\_reserved
+     - __le32
+     - m_i_reserved
      - ??
 
 .. _i_osd2:
@@ -376,30 +376,30 @@ Linux:
      - Name
      - Description
    * - 0x0
-     - \_\_le16
-     - l\_i\_blocks\_high
+     - __le16
+     - l_i_blocks_high
      - Upper 16-bits of the block count. Please see the note attached to
-       i\_blocks\_lo.
+       i_blocks_lo.
    * - 0x2
-     - \_\_le16
-     - l\_i\_file\_acl\_high
+     - __le16
+     - l_i_file_acl_high
      - Upper 16-bits of the extended attribute block (historically, the file
        ACL location). See the Extended Attributes section below.
    * - 0x4
-     - \_\_le16
-     - l\_i\_uid\_high
+     - __le16
+     - l_i_uid_high
      - Upper 16-bits of the Owner UID.
    * - 0x6
-     - \_\_le16
-     - l\_i\_gid\_high
+     - __le16
+     - l_i_gid_high
      - Upper 16-bits of the GID.
    * - 0x8
-     - \_\_le16
-     - l\_i\_checksum\_lo
+     - __le16
+     - l_i_checksum_lo
      - Lower 16-bits of the inode checksum.
    * - 0xA
-     - \_\_le16
-     - l\_i\_reserved
+     - __le16
+     - l_i_reserved
      - Unused.
 
 Hurd:
@@ -413,24 +413,24 @@ Hurd:
      - Name
      - Description
    * - 0x0
-     - \_\_le16
-     - h\_i\_reserved1
+     - __le16
+     - h_i_reserved1
      - ??
    * - 0x2
-     - \_\_u16
-     - h\_i\_mode\_high
+     - __u16
+     - h_i_mode_high
      - Upper 16-bits of the file mode.
    * - 0x4
-     - \_\_le16
-     - h\_i\_uid\_high
+     - __le16
+     - h_i_uid_high
      - Upper 16-bits of the Owner UID.
    * - 0x6
-     - \_\_le16
-     - h\_i\_gid\_high
+     - __le16
+     - h_i_gid_high
      - Upper 16-bits of the GID.
    * - 0x8
-     - \_\_u32
-     - h\_i\_author
+     - __u32
+     - h_i_author
      - Author code?
 
 Masix:
@@ -444,17 +444,17 @@ Masix:
      - Name
      - Description
    * - 0x0
-     - \_\_le16
-     - h\_i\_reserved1
+     - __le16
+     - h_i_reserved1
      - ??
    * - 0x2
-     - \_\_u16
-     - m\_i\_file\_acl\_high
+     - __u16
+     - m_i_file_acl_high
      - Upper 16-bits of the extended attribute block (historically, the file
        ACL location).
    * - 0x4
-     - \_\_u32
-     - m\_i\_reserved2[2]
+     - __u32
+     - m_i_reserved2[2]
      - ??
 
 Inode Size
@@ -466,11 +466,11 @@ In ext2 and ext3, the inode structure size was fixed at 128 bytes
 on-disk inode at format time for all inodes in the filesystem to provide
 space beyond the end of the original ext2 inode. The on-disk inode
 record size is recorded in the superblock as ``s_inode_size``. The
-number of bytes actually used by struct ext4\_inode beyond the original
+number of bytes actually used by struct ext4_inode beyond the original
 128-byte ext2 inode is recorded in the ``i_extra_isize`` field for each
-inode, which allows struct ext4\_inode to grow for a new kernel without
+inode, which allows struct ext4_inode to grow for a new kernel without
 having to upgrade all of the on-disk inodes. Access to fields beyond
-EXT2\_GOOD\_OLD\_INODE\_SIZE should be verified to be within
+EXT2_GOOD_OLD_INODE_SIZE should be verified to be within
 ``i_extra_isize``. By default, ext4 inode records are 256 bytes, and (as
 of August 2019) the inode structure is 160 bytes
 (``i_extra_isize = 32``). The extra space between the end of the inode
@@ -516,7 +516,7 @@ creation time (crtime); this field is 64-bits wide and decoded in the
 same manner as 64-bit [cma]time. Neither crtime nor dtime are accessible
 through the regular stat() interface, though debugfs will report them.
 
-We use the 32-bit signed time value plus (2^32 \* (extra epoch bits)).
+We use the 32-bit signed time value plus (2^32 * (extra epoch bits)).
 In other words:
 
 .. list-table::
@@ -525,8 +525,8 @@ In other words:
 
    * - Extra epoch bits
      - MSB of 32-bit time
-     - Adjustment for signed 32-bit to 64-bit tv\_sec
-     - Decoded 64-bit tv\_sec
+     - Adjustment for signed 32-bit to 64-bit tv_sec
+     - Decoded 64-bit tv_sec
      - valid time range
    * - 0 0
      - 1
index 5fad388..a6bef52 100644 (file)
@@ -63,8 +63,8 @@ Generally speaking, the journal has this format:
    :header-rows: 1
 
    * - Superblock
-     - descriptor\_block (data\_blocks or revocation\_block) [more data or
-       revocations] commmit\_block
+     - descriptor_block (data_blocks or revocation_block) [more data or
+       revocations] commmit_block
      - [more transactions...]
    * - 
      - One transaction
@@ -93,8 +93,8 @@ superblock.
    * - 1024 bytes of padding
      - ext4 Superblock
      - Journal Superblock
-     - descriptor\_block (data\_blocks or revocation\_block) [more data or
-       revocations] commmit\_block
+     - descriptor_block (data_blocks or revocation_block) [more data or
+       revocations] commmit_block
      - [more transactions...]
    * - 
      -
@@ -117,17 +117,17 @@ Every block in the journal starts with a common 12-byte header
      - Name
      - Description
    * - 0x0
-     - \_\_be32
-     - h\_magic
+     - __be32
+     - h_magic
      - jbd2 magic number, 0xC03B3998.
    * - 0x4
-     - \_\_be32
-     - h\_blocktype
+     - __be32
+     - h_blocktype
      - Description of what this block contains. See the jbd2_blocktype_ table
        below.
    * - 0x8
-     - \_\_be32
-     - h\_sequence
+     - __be32
+     - h_sequence
      - The transaction ID that goes with this block.
 
 .. _jbd2_blocktype:
@@ -177,99 +177,99 @@ which is 1024 bytes long:
      -
      - Static information describing the journal.
    * - 0x0
-     - journal\_header\_t (12 bytes)
-     - s\_header
+     - journal_header_t (12 bytes)
+     - s_header
      - Common header identifying this as a superblock.
    * - 0xC
-     - \_\_be32
-     - s\_blocksize
+     - __be32
+     - s_blocksize
      - Journal device block size.
    * - 0x10
-     - \_\_be32
-     - s\_maxlen
+     - __be32
+     - s_maxlen
      - Total number of blocks in this journal.
    * - 0x14
-     - \_\_be32
-     - s\_first
+     - __be32
+     - s_first
      - First block of log information.
    * -
      -
      -
      - Dynamic information describing the current state of the log.
    * - 0x18
-     - \_\_be32
-     - s\_sequence
+     - __be32
+     - s_sequence
      - First commit ID expected in log.
    * - 0x1C
-     - \_\_be32
-     - s\_start
+     - __be32
+     - s_start
      - Block number of the start of log. Contrary to the comments, this field
        being zero does not imply that the journal is clean!
    * - 0x20
-     - \_\_be32
-     - s\_errno
-     - Error value, as set by jbd2\_journal\_abort().
+     - __be32
+     - s_errno
+     - Error value, as set by jbd2_journal_abort().
    * -
      -
      -
      - The remaining fields are only valid in a v2 superblock.
    * - 0x24
-     - \_\_be32
-     - s\_feature\_compat;
+     - __be32
+     - s_feature_compat;
      - Compatible feature set. See the table jbd2_compat_ below.
    * - 0x28
-     - \_\_be32
-     - s\_feature\_incompat
+     - __be32
+     - s_feature_incompat
      - Incompatible feature set. See the table jbd2_incompat_ below.
    * - 0x2C
-     - \_\_be32
-     - s\_feature\_ro\_compat
+     - __be32
+     - s_feature_ro_compat
      - Read-only compatible feature set. There aren't any of these currently.
    * - 0x30
-     - \_\_u8
-     - s\_uuid[16]
+     - __u8
+     - s_uuid[16]
      - 128-bit uuid for journal. This is compared against the copy in the ext4
        super block at mount time.
    * - 0x40
-     - \_\_be32
-     - s\_nr\_users
+     - __be32
+     - s_nr_users
      - Number of file systems sharing this journal.
    * - 0x44
-     - \_\_be32
-     - s\_dynsuper
+     - __be32
+     - s_dynsuper
      - Location of dynamic super block copy. (Not used?)
    * - 0x48
-     - \_\_be32
-     - s\_max\_transaction
+     - __be32
+     - s_max_transaction
      - Limit of journal blocks per transaction. (Not used?)
    * - 0x4C
-     - \_\_be32
-     - s\_max\_trans\_data
+     - __be32
+     - s_max_trans_data
      - Limit of data blocks per transaction. (Not used?)
    * - 0x50
-     - \_\_u8
-     - s\_checksum\_type
+     - __u8
+     - s_checksum_type
      - Checksum algorithm used for the journal.  See jbd2_checksum_type_ for
        more info.
    * - 0x51
-     - \_\_u8[3]
-     - s\_padding2
+     - __u8[3]
+     - s_padding2
      -
    * - 0x54
-     - \_\_be32
-     - s\_num\_fc\_blocks
+     - __be32
+     - s_num_fc_blocks
      - Number of fast commit blocks in the journal.
    * - 0x58
-     - \_\_u32
-     - s\_padding[42]
+     - __u32
+     - s_padding[42]
      -
    * - 0xFC
-     - \_\_be32
-     - s\_checksum
+     - __be32
+     - s_checksum
      - Checksum of the entire superblock, with this field set to zero.
    * - 0x100
-     - \_\_u8
-     - s\_users[16\*48]
+     - __u8
+     - s_users[16*48]
      - ids of all file systems sharing the log. e2fsprogs/Linux don't allow
        shared external journals, but I imagine Lustre (or ocfs2?), which use
        the jbd2 code, might.
@@ -286,7 +286,7 @@ The journal compat features are any combination of the following:
      - Description
    * - 0x1
      - Journal maintains checksums on the data blocks.
-       (JBD2\_FEATURE\_COMPAT\_CHECKSUM)
+       (JBD2_FEATURE_COMPAT_CHECKSUM)
 
 .. _jbd2_incompat:
 
@@ -299,23 +299,23 @@ The journal incompat features are any combination of the following:
    * - Value
      - Description
    * - 0x1
-     - Journal has block revocation records. (JBD2\_FEATURE\_INCOMPAT\_REVOKE)
+     - Journal has block revocation records. (JBD2_FEATURE_INCOMPAT_REVOKE)
    * - 0x2
      - Journal can deal with 64-bit block numbers.
-       (JBD2\_FEATURE\_INCOMPAT\_64BIT)
+       (JBD2_FEATURE_INCOMPAT_64BIT)
    * - 0x4
-     - Journal commits asynchronously. (JBD2\_FEATURE\_INCOMPAT\_ASYNC\_COMMIT)
+     - Journal commits asynchronously. (JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)
    * - 0x8
      - This journal uses v2 of the checksum on-disk format. Each journal
        metadata block gets its own checksum, and the block tags in the
        descriptor table contain checksums for each of the data blocks in the
-       journal. (JBD2\_FEATURE\_INCOMPAT\_CSUM\_V2)
+       journal. (JBD2_FEATURE_INCOMPAT_CSUM_V2)
    * - 0x10
      - This journal uses v3 of the checksum on-disk format. This is the same as
        v2, but the journal block tag size is fixed regardless of the size of
-       block numbers. (JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3)
+       block numbers. (JBD2_FEATURE_INCOMPAT_CSUM_V3)
    * - 0x20
-     - Journal has fast commit blocks. (JBD2\_FEATURE\_INCOMPAT\_FAST\_COMMIT)
+     - Journal has fast commit blocks. (JBD2_FEATURE_INCOMPAT_FAST_COMMIT)
 
 .. _jbd2_checksum_type:
 
@@ -355,11 +355,11 @@ Descriptor blocks consume at least 36 bytes, but use a full block:
      - Name
      - Descriptor
    * - 0x0
-     - journal\_header\_t
+     - journal_header_t
      - (open coded)
      - Common block header.
    * - 0xC
-     - struct journal\_block\_tag\_s
+     - struct journal_block_tag_s
      - open coded array[]
      - Enough tags either to fill up the block or to describe all the data
        blocks that follow this descriptor block.
@@ -367,7 +367,7 @@ Descriptor blocks consume at least 36 bytes, but use a full block:
 Journal block tags have any of the following formats, depending on which
 journal feature and block tag flags are set.
 
-If JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 is set, the journal block tag is
+If JBD2_FEATURE_INCOMPAT_CSUM_V3 is set, the journal block tag is
 defined as ``struct journal_block_tag3_s``, which looks like the
 following. The size is 16 or 32 bytes.
 
@@ -380,24 +380,24 @@ following. The size is 16 or 32 bytes.
      - Name
      - Descriptor
    * - 0x0
-     - \_\_be32
-     - t\_blocknr
+     - __be32
+     - t_blocknr
      - Lower 32-bits of the location of where the corresponding data block
        should end up on disk.
    * - 0x4
-     - \_\_be32
-     - t\_flags
+     - __be32
+     - t_flags
      - Flags that go with the descriptor. See the table jbd2_tag_flags_ for
        more info.
    * - 0x8
-     - \_\_be32
-     - t\_blocknr\_high
+     - __be32
+     - t_blocknr_high
      - Upper 32-bits of the location of where the corresponding data block
-       should end up on disk. This is zero if JBD2\_FEATURE\_INCOMPAT\_64BIT is
+       should end up on disk. This is zero if JBD2_FEATURE_INCOMPAT_64BIT is
        not enabled.
    * - 0xC
-     - \_\_be32
-     - t\_checksum
+     - __be32
+     - t_checksum
      - Checksum of the journal UUID, the sequence number, and the data block.
    * -
      -
@@ -433,7 +433,7 @@ The journal tag flags are any combination of the following:
    * - 0x8
      - This is the last tag in this descriptor block.
 
-If JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 is NOT set, the journal block tag
+If JBD2_FEATURE_INCOMPAT_CSUM_V3 is NOT set, the journal block tag
 is defined as ``struct journal_block_tag_s``, which looks like the
 following. The size is 8, 12, 24, or 28 bytes:
 
@@ -446,18 +446,18 @@ following. The size is 8, 12, 24, or 28 bytes:
      - Name
      - Descriptor
    * - 0x0
-     - \_\_be32
-     - t\_blocknr
+     - __be32
+     - t_blocknr
      - Lower 32-bits of the location of where the corresponding data block
        should end up on disk.
    * - 0x4
-     - \_\_be16
-     - t\_checksum
+     - __be16
+     - t_checksum
      - Checksum of the journal UUID, the sequence number, and the data block.
        Note that only the lower 16 bits are stored.
    * - 0x6
-     - \_\_be16
-     - t\_flags
+     - __be16
+     - t_flags
      - Flags that go with the descriptor. See the table jbd2_tag_flags_ for
        more info.
    * -
@@ -466,8 +466,8 @@ following. The size is 8, 12, 24, or 28 bytes:
      - This next field is only present if the super block indicates support for
        64-bit block numbers.
    * - 0x8
-     - \_\_be32
-     - t\_blocknr\_high
+     - __be32
+     - t_blocknr_high
      - Upper 32-bits of the location of where the corresponding data block
        should end up on disk.
    * -
@@ -483,8 +483,8 @@ following. The size is 8, 12, 24, or 28 bytes:
        ``j_uuid`` field in ``struct journal_s``, but only tune2fs touches that
        field.
 
-If JBD2\_FEATURE\_INCOMPAT\_CSUM\_V2 or
-JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 are set, the end of the block is a
+If JBD2_FEATURE_INCOMPAT_CSUM_V2 or
+JBD2_FEATURE_INCOMPAT_CSUM_V3 are set, the end of the block is a
 ``struct jbd2_journal_block_tail``, which looks like this:
 
 .. list-table::
@@ -496,8 +496,8 @@ JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 are set, the end of the block is a
      - Name
      - Descriptor
    * - 0x0
-     - \_\_be32
-     - t\_checksum
+     - __be32
+     - t_checksum
      - Checksum of the journal UUID + the descriptor block, with this field set
        to zero.
 
@@ -538,25 +538,25 @@ length, but use a full block:
      - Name
      - Description
    * - 0x0
-     - journal\_header\_t
-     - r\_header
+     - journal_header_t
+     - r_header
      - Common block header.
    * - 0xC
-     - \_\_be32
-     - r\_count
+     - __be32
+     - r_count
      - Number of bytes used in this block.
    * - 0x10
-     - \_\_be32 or \_\_be64
+     - __be32 or __be64
      - blocks[0]
      - Blocks to revoke.
 
-After r\_count is a linear array of block numbers that are effectively
+After r_count is a linear array of block numbers that are effectively
 revoked by this transaction. The size of each block number is 8 bytes if
 the superblock advertises 64-bit block number support, or 4 bytes
 otherwise.
 
-If JBD2\_FEATURE\_INCOMPAT\_CSUM\_V2 or
-JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 are set, the end of the revocation
+If JBD2_FEATURE_INCOMPAT_CSUM_V2 or
+JBD2_FEATURE_INCOMPAT_CSUM_V3 are set, the end of the revocation
 block is a ``struct jbd2_journal_revoke_tail``, which has this format:
 
 .. list-table::
@@ -568,8 +568,8 @@ block is a ``struct jbd2_journal_revoke_tail``, which has this format:
      - Name
      - Description
    * - 0x0
-     - \_\_be32
-     - r\_checksum
+     - __be32
+     - r_checksum
      - Checksum of the journal UUID + revocation block
 
 Commit Block
@@ -592,38 +592,38 @@ bytes long (but uses a full block):
      - Name
      - Descriptor
    * - 0x0
-     - journal\_header\_s
+     - journal_header_s
      - (open coded)
      - Common block header.
    * - 0xC
      - unsigned char
-     - h\_chksum\_type
+     - h_chksum_type
      - The type of checksum to use to verify the integrity of the data blocks
        in the transaction. See jbd2_checksum_type_ for more info.
    * - 0xD
      - unsigned char
-     - h\_chksum\_size
+     - h_chksum_size
      - The number of bytes used by the checksum. Most likely 4.
    * - 0xE
      - unsigned char
-     - h\_padding[2]
+     - h_padding[2]
      -
    * - 0x10
-     - \_\_be32
-     - h\_chksum[JBD2\_CHECKSUM\_BYTES]
+     - __be32
+     - h_chksum[JBD2_CHECKSUM_BYTES]
      - 32 bytes of space to store checksums. If
-       JBD2\_FEATURE\_INCOMPAT\_CSUM\_V2 or JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3
+       JBD2_FEATURE_INCOMPAT_CSUM_V2 or JBD2_FEATURE_INCOMPAT_CSUM_V3
        are set, the first ``__be32`` is the checksum of the journal UUID and
        the entire commit block, with this field zeroed. If
-       JBD2\_FEATURE\_COMPAT\_CHECKSUM is set, the first ``__be32`` is the
+       JBD2_FEATURE_COMPAT_CHECKSUM is set, the first ``__be32`` is the
        crc32 of all the blocks already written to the transaction.
    * - 0x30
-     - \_\_be64
-     - h\_commit\_sec
+     - __be64
+     - h_commit_sec
      - The time that the transaction was committed, in seconds since the epoch.
    * - 0x38
-     - \_\_be32
-     - h\_commit\_nsec
+     - __be32
+     - h_commit_nsec
      - Nanoseconds component of the above timestamp.
 
 Fast commits
index 2566098..174dd65 100644 (file)
@@ -7,8 +7,8 @@ Multiple mount protection (MMP) is a feature that protects the
 filesystem against multiple hosts trying to use the filesystem
 simultaneously. When a filesystem is opened (for mounting, or fsck,
 etc.), the MMP code running on the node (call it node A) checks a
-sequence number. If the sequence number is EXT4\_MMP\_SEQ\_CLEAN, the
-open continues. If the sequence number is EXT4\_MMP\_SEQ\_FSCK, then
+sequence number. If the sequence number is EXT4_MMP_SEQ_CLEAN, the
+open continues. If the sequence number is EXT4_MMP_SEQ_FSCK, then
 fsck is (hopefully) running, and open fails immediately. Otherwise, the
 open code will wait for twice the specified MMP check interval and check
 the sequence number again. If the sequence number has changed, then the
@@ -40,38 +40,38 @@ The MMP structure (``struct mmp_struct``) is as follows:
      - Name
      - Description
    * - 0x0
-     - \_\_le32
-     - mmp\_magic
+     - __le32
+     - mmp_magic
      - Magic number for MMP, 0x004D4D50 (“MMP”).
    * - 0x4
-     - \_\_le32
-     - mmp\_seq
+     - __le32
+     - mmp_seq
      - Sequence number, updated periodically.
    * - 0x8
-     - \_\_le64
-     - mmp\_time
+     - __le64
+     - mmp_time
      - Time that the MMP block was last updated.
    * - 0x10
      - char[64]
-     - mmp\_nodename
+     - mmp_nodename
      - Hostname of the node that opened the filesystem.
    * - 0x50
      - char[32]
-     - mmp\_bdevname
+     - mmp_bdevname
      - Block device name of the filesystem.
    * - 0x70
-     - \_\_le16
-     - mmp\_check\_interval
+     - __le16
+     - mmp_check_interval
      - The MMP re-check interval, in seconds.
    * - 0x72
-     - \_\_le16
-     - mmp\_pad1
+     - __le16
+     - mmp_pad1
      - Zero.
    * - 0x74
-     - \_\_le32[226]
-     - mmp\_pad2
+     - __le32[226]
+     - mmp_pad2
      - Zero.
    * - 0x3FC
-     - \_\_le32
-     - mmp\_checksum
+     - __le32
+     - mmp_checksum
      - Checksum of the MMP block.
index 123ebfd..0fad6ed 100644 (file)
@@ -7,7 +7,7 @@ An ext4 file system is split into a series of block groups. To reduce
 performance difficulties due to fragmentation, the block allocator tries
 very hard to keep each file's blocks within the same group, thereby
 reducing seek times. The size of a block group is specified in
-``sb.s_blocks_per_group`` blocks, though it can also calculated as 8 \*
+``sb.s_blocks_per_group`` blocks, though it can also calculated as 8 *
 ``block_size_in_bytes``. With the default block size of 4KiB, each group
 will contain 32,768 blocks, for a length of 128MiB. The number of block
 groups is the size of the device divided by the size of a block group.
index 94f304e..fc06369 100644 (file)
@@ -34,7 +34,7 @@ ext4 reserves some inode for special features, as follows:
    * - 10
      - Replica inode, used for some non-upstream feature?
    * - 11
-     - Traditional first non-reserved inode. Usually this is the lost+found directory. See s\_first\_ino in the superblock.
+     - Traditional first non-reserved inode. Usually this is the lost+found directory. See s_first_ino in the superblock.
 
 Note that there are also some inodes allocated from non-reserved inode numbers
 for other filesystem features which are not referenced from standard directory
@@ -47,9 +47,9 @@ hierarchy. These are generally reference from the superblock. They are:
    * - Superblock field
      - Description
 
-   * - s\_lpf\_ino
+   * - s_lpf_ino
      - Inode number of lost+found directory.
-   * - s\_prj\_quota\_inum
+   * - s_prj_quota_inum
      - Inode number of quota file tracking project quotas
-   * - s\_orphan\_file\_inum
+   * - s_orphan_file_inum
      - Inode number of file tracking orphan inodes.
index f6a548e..2688885 100644 (file)
@@ -7,7 +7,7 @@ The superblock records various information about the enclosing
 filesystem, such as block counts, inode counts, supported features,
 maintenance information, and more.
 
-If the sparse\_super feature flag is set, redundant copies of the
+If the sparse_super feature flag is set, redundant copies of the
 superblock and group descriptors are kept only in the groups whose group
 number is either 0 or a power of 3, 5, or 7. If the flag is not set,
 redundant copies are kept in all groups.
@@ -27,107 +27,107 @@ The ext4 superblock is laid out as follows in
      - Name
      - Description
    * - 0x0
-     - \_\_le32
-     - s\_inodes\_count
+     - __le32
+     - s_inodes_count
      - Total inode count.
    * - 0x4
-     - \_\_le32
-     - s\_blocks\_count\_lo
+     - __le32
+     - s_blocks_count_lo
      - Total block count.
    * - 0x8
-     - \_\_le32
-     - s\_r\_blocks\_count\_lo
+     - __le32
+     - s_r_blocks_count_lo
      - This number of blocks can only be allocated by the super-user.
    * - 0xC
-     - \_\_le32
-     - s\_free\_blocks\_count\_lo
+     - __le32
+     - s_free_blocks_count_lo
      - Free block count.
    * - 0x10
-     - \_\_le32
-     - s\_free\_inodes\_count
+     - __le32
+     - s_free_inodes_count
      - Free inode count.
    * - 0x14
-     - \_\_le32
-     - s\_first\_data\_block
+     - __le32
+     - s_first_data_block
      - First data block. This must be at least 1 for 1k-block filesystems and
        is typically 0 for all other block sizes.
    * - 0x18
-     - \_\_le32
-     - s\_log\_block\_size
-     - Block size is 2 ^ (10 + s\_log\_block\_size).
+     - __le32
+     - s_log_block_size
+     - Block size is 2 ^ (10 + s_log_block_size).
    * - 0x1C
-     - \_\_le32
-     - s\_log\_cluster\_size
-     - Cluster size is 2 ^ (10 + s\_log\_cluster\_size) blocks if bigalloc is
-       enabled. Otherwise s\_log\_cluster\_size must equal s\_log\_block\_size.
+     - __le32
+     - s_log_cluster_size
+     - Cluster size is 2 ^ (10 + s_log_cluster_size) blocks if bigalloc is
+       enabled. Otherwise s_log_cluster_size must equal s_log_block_size.
    * - 0x20
-     - \_\_le32
-     - s\_blocks\_per\_group
+     - __le32
+     - s_blocks_per_group
      - Blocks per group.
    * - 0x24
-     - \_\_le32
-     - s\_clusters\_per\_group
+     - __le32
+     - s_clusters_per_group
      - Clusters per group, if bigalloc is enabled. Otherwise
-       s\_clusters\_per\_group must equal s\_blocks\_per\_group.
+       s_clusters_per_group must equal s_blocks_per_group.
    * - 0x28
-     - \_\_le32
-     - s\_inodes\_per\_group
+     - __le32
+     - s_inodes_per_group
      - Inodes per group.
    * - 0x2C
-     - \_\_le32
-     - s\_mtime
+     - __le32
+     - s_mtime
      - Mount time, in seconds since the epoch.
    * - 0x30
-     - \_\_le32
-     - s\_wtime
+     - __le32
+     - s_wtime
      - Write time, in seconds since the epoch.
    * - 0x34
-     - \_\_le16
-     - s\_mnt\_count
+     - __le16
+     - s_mnt_count
      - Number of mounts since the last fsck.
    * - 0x36
-     - \_\_le16
-     - s\_max\_mnt\_count
+     - __le16
+     - s_max_mnt_count
      - Number of mounts beyond which a fsck is needed.
    * - 0x38
-     - \_\_le16
-     - s\_magic
+     - __le16
+     - s_magic
      - Magic signature, 0xEF53
    * - 0x3A
-     - \_\_le16
-     - s\_state
+     - __le16
+     - s_state
      - File system state. See super_state_ for more info.
    * - 0x3C
-     - \_\_le16
-     - s\_errors
+     - __le16
+     - s_errors
      - Behaviour when detecting errors. See super_errors_ for more info.
    * - 0x3E
-     - \_\_le16
-     - s\_minor\_rev\_level
+     - __le16
+     - s_minor_rev_level
      - Minor revision level.
    * - 0x40
-     - \_\_le32
-     - s\_lastcheck
+     - __le32
+     - s_lastcheck
      - Time of last check, in seconds since the epoch.
    * - 0x44
-     - \_\_le32
-     - s\_checkinterval
+     - __le32
+     - s_checkinterval
      - Maximum time between checks, in seconds.
    * - 0x48
-     - \_\_le32
-     - s\_creator\_os
+     - __le32
+     - s_creator_os
      - Creator OS. See the table super_creator_ for more info.
    * - 0x4C
-     - \_\_le32
-     - s\_rev\_level
+     - __le32
+     - s_rev_level
      - Revision level. See the table super_revision_ for more info.
    * - 0x50
-     - \_\_le16
-     - s\_def\_resuid
+     - __le16
+     - s_def_resuid
      - Default uid for reserved blocks.
    * - 0x52
-     - \_\_le16
-     - s\_def\_resgid
+     - __le16
+     - s_def_resgid
      - Default gid for reserved blocks.
    * -
      -
@@ -143,50 +143,50 @@ The ext4 superblock is laid out as follows in
        about a feature in either the compatible or incompatible feature set, it
        must abort and not try to meddle with things it doesn't understand...
    * - 0x54
-     - \_\_le32
-     - s\_first\_ino
+     - __le32
+     - s_first_ino
      - First non-reserved inode.
    * - 0x58
-     - \_\_le16
-     - s\_inode\_size
+     - __le16
+     - s_inode_size
      - Size of inode structure, in bytes.
    * - 0x5A
-     - \_\_le16
-     - s\_block\_group\_nr
+     - __le16
+     - s_block_group_nr
      - Block group # of this superblock.
    * - 0x5C
-     - \_\_le32
-     - s\_feature\_compat
+     - __le32
+     - s_feature_compat
      - Compatible feature set flags. Kernel can still read/write this fs even
        if it doesn't understand a flag; fsck should not do that. See the
        super_compat_ table for more info.
    * - 0x60
-     - \_\_le32
-     - s\_feature\_incompat
+     - __le32
+     - s_feature_incompat
      - Incompatible feature set. If the kernel or fsck doesn't understand one
        of these bits, it should stop. See the super_incompat_ table for more
        info.
    * - 0x64
-     - \_\_le32
-     - s\_feature\_ro\_compat
+     - __le32
+     - s_feature_ro_compat
      - Readonly-compatible feature set. If the kernel doesn't understand one of
        these bits, it can still mount read-only. See the super_rocompat_ table
        for more info.
    * - 0x68
-     - \_\_u8
-     - s\_uuid[16]
+     - __u8
+     - s_uuid[16]
      - 128-bit UUID for volume.
    * - 0x78
      - char
-     - s\_volume\_name[16]
+     - s_volume_name[16]
      - Volume label.
    * - 0x88
      - char
-     - s\_last\_mounted[64]
+     - s_last_mounted[64]
      - Directory where filesystem was last mounted.
    * - 0xC8
-     - \_\_le32
-     - s\_algorithm\_usage\_bitmap
+     - __le32
+     - s_algorithm_usage_bitmap
      - For compression (Not used in e2fsprogs/Linux)
    * -
      -
@@ -194,18 +194,18 @@ The ext4 superblock is laid out as follows in
      - Performance hints.  Directory preallocation should only happen if the
        EXT4_FEATURE_COMPAT_DIR_PREALLOC flag is on.
    * - 0xCC
-     - \_\_u8
-     - s\_prealloc\_blocks
+     - __u8
+     - s_prealloc_blocks
      - #. of blocks to try to preallocate for ... files? (Not used in
        e2fsprogs/Linux)
    * - 0xCD
-     - \_\_u8
-     - s\_prealloc\_dir\_blocks
+     - __u8
+     - s_prealloc_dir_blocks
      - #. of blocks to preallocate for directories. (Not used in
        e2fsprogs/Linux)
    * - 0xCE
-     - \_\_le16
-     - s\_reserved\_gdt\_blocks
+     - __le16
+     - s_reserved_gdt_blocks
      - Number of reserved GDT entries for future filesystem expansion.
    * -
      -
@@ -213,281 +213,281 @@ The ext4 superblock is laid out as follows in
      - Journalling support is valid only if EXT4_FEATURE_COMPAT_HAS_JOURNAL is
        set.
    * - 0xD0
-     - \_\_u8
-     - s\_journal\_uuid[16]
+     - __u8
+     - s_journal_uuid[16]
      - UUID of journal superblock
    * - 0xE0
-     - \_\_le32
-     - s\_journal\_inum
+     - __le32
+     - s_journal_inum
      - inode number of journal file.
    * - 0xE4
-     - \_\_le32
-     - s\_journal\_dev
+     - __le32
+     - s_journal_dev
      - Device number of journal file, if the external journal feature flag is
        set.
    * - 0xE8
-     - \_\_le32
-     - s\_last\_orphan
+     - __le32
+     - s_last_orphan
      - Start of list of orphaned inodes to delete.
    * - 0xEC
-     - \_\_le32
-     - s\_hash\_seed[4]
+     - __le32
+     - s_hash_seed[4]
      - HTREE hash seed.
    * - 0xFC
-     - \_\_u8
-     - s\_def\_hash\_version
+     - __u8
+     - s_def_hash_version
      - Default hash algorithm to use for directory hashes. See super_def_hash_
        for more info.
    * - 0xFD
-     - \_\_u8
-     - s\_jnl\_backup\_type
-     - If this value is 0 or EXT3\_JNL\_BACKUP\_BLOCKS (1), then the
+     - __u8
+     - s_jnl_backup_type
+     - If this value is 0 or EXT3_JNL_BACKUP_BLOCKS (1), then the
        ``s_jnl_blocks`` field contains a duplicate copy of the inode's
        ``i_block[]`` array and ``i_size``.
    * - 0xFE
-     - \_\_le16
-     - s\_desc\_size
+     - __le16
+     - s_desc_size
      - Size of group descriptors, in bytes, if the 64bit incompat feature flag
        is set.
    * - 0x100
-     - \_\_le32
-     - s\_default\_mount\_opts
+     - __le32
+     - s_default_mount_opts
      - Default mount options. See the super_mountopts_ table for more info.
    * - 0x104
-     - \_\_le32
-     - s\_first\_meta\_bg
-     - First metablock block group, if the meta\_bg feature is enabled.
+     - __le32
+     - s_first_meta_bg
+     - First metablock block group, if the meta_bg feature is enabled.
    * - 0x108
-     - \_\_le32
-     - s\_mkfs\_time
+     - __le32
+     - s_mkfs_time
      - When the filesystem was created, in seconds since the epoch.
    * - 0x10C
-     - \_\_le32
-     - s\_jnl\_blocks[17]
+     - __le32
+     - s_jnl_blocks[17]
      - Backup copy of the journal inode's ``i_block[]`` array in the first 15
-       elements and i\_size\_high and i\_size in the 16th and 17th elements,
+       elements and i_size_high and i_size in the 16th and 17th elements,
        respectively.
    * -
      -
      -
      - 64bit support is valid only if EXT4_FEATURE_COMPAT_64BIT is set.
    * - 0x150
-     - \_\_le32
-     - s\_blocks\_count\_hi
+     - __le32
+     - s_blocks_count_hi
      - High 32-bits of the block count.
    * - 0x154
-     - \_\_le32
-     - s\_r\_blocks\_count\_hi
+     - __le32
+     - s_r_blocks_count_hi
      - High 32-bits of the reserved block count.
    * - 0x158
-     - \_\_le32
-     - s\_free\_blocks\_count\_hi
+     - __le32
+     - s_free_blocks_count_hi
      - High 32-bits of the free block count.
    * - 0x15C
-     - \_\_le16
-     - s\_min\_extra\_isize
+     - __le16
+     - s_min_extra_isize
      - All inodes have at least # bytes.
    * - 0x15E
-     - \_\_le16
-     - s\_want\_extra\_isize
+     - __le16
+     - s_want_extra_isize
      - New inodes should reserve # bytes.
    * - 0x160
-     - \_\_le32
-     - s\_flags
+     - __le32
+     - s_flags
      - Miscellaneous flags. See the super_flags_ table for more info.
    * - 0x164
-     - \_\_le16
-     - s\_raid\_stride
+     - __le16
+     - s_raid_stride
      - RAID stride. This is the number of logical blocks read from or written
        to the disk before moving to the next disk. This affects the placement
        of filesystem metadata, which will hopefully make RAID storage faster.
    * - 0x166
-     - \_\_le16
-     - s\_mmp\_interval
+     - __le16
+     - s_mmp_interval
      - #. seconds to wait in multi-mount prevention (MMP) checking. In theory,
        MMP is a mechanism to record in the superblock which host and device
        have mounted the filesystem, in order to prevent multiple mounts. This
        feature does not seem to be implemented...
    * - 0x168
-     - \_\_le64
-     - s\_mmp\_block
+     - __le64
+     - s_mmp_block
      - Block # for multi-mount protection data.
    * - 0x170
-     - \_\_le32
-     - s\_raid\_stripe\_width
+     - __le32
+     - s_raid_stripe_width
      - RAID stripe width. This is the number of logical blocks read from or
        written to the disk before coming back to the current disk. This is used
        by the block allocator to try to reduce the number of read-modify-write
        operations in a RAID5/6.
    * - 0x174
-     - \_\_u8
-     - s\_log\_groups\_per\_flex
+     - __u8
+     - s_log_groups_per_flex
      - Size of a flexible block group is 2 ^ ``s_log_groups_per_flex``.
    * - 0x175
-     - \_\_u8
-     - s\_checksum\_type
+     - __u8
+     - s_checksum_type
      - Metadata checksum algorithm type. The only valid value is 1 (crc32c).
    * - 0x176
-     - \_\_le16
-     - s\_reserved\_pad
+     - __le16
+     - s_reserved_pad
      -
    * - 0x178
-     - \_\_le64
-     - s\_kbytes\_written
+     - __le64
+     - s_kbytes_written
      - Number of KiB written to this filesystem over its lifetime.
    * - 0x180
-     - \_\_le32
-     - s\_snapshot\_inum
+     - __le32
+     - s_snapshot_inum
      - inode number of active snapshot. (Not used in e2fsprogs/Linux.)
    * - 0x184
-     - \_\_le32
-     - s\_snapshot\_id
+     - __le32
+     - s_snapshot_id
      - Sequential ID of active snapshot. (Not used in e2fsprogs/Linux.)
    * - 0x188
-     - \_\_le64
-     - s\_snapshot\_r\_blocks\_count
+     - __le64
+     - s_snapshot_r_blocks_count
      - Number of blocks reserved for active snapshot's future use. (Not used in
        e2fsprogs/Linux.)
    * - 0x190
-     - \_\_le32
-     - s\_snapshot\_list
+     - __le32
+     - s_snapshot_list
      - inode number of the head of the on-disk snapshot list. (Not used in
        e2fsprogs/Linux.)
    * - 0x194
-     - \_\_le32
-     - s\_error\_count
+     - __le32
+     - s_error_count
      - Number of errors seen.
    * - 0x198
-     - \_\_le32
-     - s\_first\_error\_time
+     - __le32
+     - s_first_error_time
      - First time an error happened, in seconds since the epoch.
    * - 0x19C
-     - \_\_le32
-     - s\_first\_error\_ino
+     - __le32
+     - s_first_error_ino
      - inode involved in first error.
    * - 0x1A0
-     - \_\_le64
-     - s\_first\_error\_block
+     - __le64
+     - s_first_error_block
      - Number of block involved of first error.
    * - 0x1A8
-     - \_\_u8
-     - s\_first\_error\_func[32]
+     - __u8
+     - s_first_error_func[32]
      - Name of function where the error happened.
    * - 0x1C8
-     - \_\_le32
-     - s\_first\_error\_line
+     - __le32
+     - s_first_error_line
      - Line number where error happened.
    * - 0x1CC
-     - \_\_le32
-     - s\_last\_error\_time
+     - __le32
+     - s_last_error_time
      - Time of most recent error, in seconds since the epoch.
    * - 0x1D0
-     - \_\_le32
-     - s\_last\_error\_ino
+     - __le32
+     - s_last_error_ino
      - inode involved in most recent error.
    * - 0x1D4
-     - \_\_le32
-     - s\_last\_error\_line
+     - __le32
+     - s_last_error_line
      - Line number where most recent error happened.
    * - 0x1D8
-     - \_\_le64
-     - s\_last\_error\_block
+     - __le64
+     - s_last_error_block
      - Number of block involved in most recent error.
    * - 0x1E0
-     - \_\_u8
-     - s\_last\_error\_func[32]
+     - __u8
+     - s_last_error_func[32]
      - Name of function where the most recent error happened.
    * - 0x200
-     - \_\_u8
-     - s\_mount\_opts[64]
+     - __u8
+     - s_mount_opts[64]
      - ASCIIZ string of mount options.
    * - 0x240
-     - \_\_le32
-     - s\_usr\_quota\_inum
+     - __le32
+     - s_usr_quota_inum
      - Inode number of user `quota <quota>`__ file.
    * - 0x244
-     - \_\_le32
-     - s\_grp\_quota\_inum
+     - __le32
+     - s_grp_quota_inum
      - Inode number of group `quota <quota>`__ file.
    * - 0x248
-     - \_\_le32
-     - s\_overhead\_blocks
+     - __le32
+     - s_overhead_blocks
      - Overhead blocks/clusters in fs. (Huh? This field is always zero, which
        means that the kernel calculates it dynamically.)
    * - 0x24C
-     - \_\_le32
-     - s\_backup\_bgs[2]
-     - Block groups containing superblock backups (if sparse\_super2)
+     - __le32
+     - s_backup_bgs[2]
+     - Block groups containing superblock backups (if sparse_super2)
    * - 0x254
-     - \_\_u8
-     - s\_encrypt\_algos[4]
+     - __u8
+     - s_encrypt_algos[4]
      - Encryption algorithms in use. There can be up to four algorithms in use
        at any time; valid algorithm codes are given in the super_encrypt_ table
        below.
    * - 0x258
-     - \_\_u8
-     - s\_encrypt\_pw\_salt[16]
+     - __u8
+     - s_encrypt_pw_salt[16]
      - Salt for the string2key algorithm for encryption.
    * - 0x268
-     - \_\_le32
-     - s\_lpf\_ino
+     - __le32
+     - s_lpf_ino
      - Inode number of lost+found
    * - 0x26C
-     - \_\_le32
-     - s\_prj\_quota\_inum
+     - __le32
+     - s_prj_quota_inum
      - Inode that tracks project quotas.
    * - 0x270
-     - \_\_le32
-     - s\_checksum\_seed
-     - Checksum seed used for metadata\_csum calculations. This value is
-       crc32c(~0, $orig\_fs\_uuid).
+     - __le32
+     - s_checksum_seed
+     - Checksum seed used for metadata_csum calculations. This value is
+       crc32c(~0, $orig_fs_uuid).
    * - 0x274
-     - \_\_u8
-     - s\_wtime_hi
+     - __u8
+     - s_wtime_hi
      - Upper 8 bits of the s_wtime field.
    * - 0x275
-     - \_\_u8
-     - s\_mtime_hi
+     - __u8
+     - s_mtime_hi
      - Upper 8 bits of the s_mtime field.
    * - 0x276
-     - \_\_u8
-     - s\_mkfs_time_hi
+     - __u8
+     - s_mkfs_time_hi
      - Upper 8 bits of the s_mkfs_time field.
    * - 0x277
-     - \_\_u8
-     - s\_lastcheck_hi
+     - __u8
+     - s_lastcheck_hi
      - Upper 8 bits of the s_lastcheck_hi field.
    * - 0x278
-     - \_\_u8
-     - s\_first_error_time_hi
+     - __u8
+     - s_first_error_time_hi
      - Upper 8 bits of the s_first_error_time_hi field.
    * - 0x279
-     - \_\_u8
-     - s\_last_error_time_hi
+     - __u8
+     - s_last_error_time_hi
      - Upper 8 bits of the s_last_error_time_hi field.
    * - 0x27A
-     - \_\_u8
-     - s\_pad[2]
+     - __u8
+     - s_pad[2]
      - Zero padding.
    * - 0x27C
-     - \_\_le16
-     - s\_encoding
+     - __le16
+     - s_encoding
      - Filename charset encoding.
    * - 0x27E
-     - \_\_le16
-     - s\_encoding_flags
+     - __le16
+     - s_encoding_flags
      - Filename charset encoding flags.
    * - 0x280
-     - \_\_le32
-     - s\_orphan\_file\_inum
+     - __le32
+     - s_orphan_file_inum
      - Orphan file inode number.
    * - 0x284
-     - \_\_le32
-     - s\_reserved[94]
+     - __le32
+     - s_reserved[94]
      - Padding to the end of the block.
    * - 0x3FC
-     - \_\_le32
-     - s\_checksum
+     - __le32
+     - s_checksum
      - Superblock checksum.
 
 .. _super_state:
@@ -574,44 +574,44 @@ following:
    * - Value
      - Description
    * - 0x1
-     - Directory preallocation (COMPAT\_DIR\_PREALLOC).
+     - Directory preallocation (COMPAT_DIR_PREALLOC).
    * - 0x2
      - “imagic inodes”. Not clear from the code what this does
-       (COMPAT\_IMAGIC\_INODES).
+       (COMPAT_IMAGIC_INODES).
    * - 0x4
-     - Has a journal (COMPAT\_HAS\_JOURNAL).
+     - Has a journal (COMPAT_HAS_JOURNAL).
    * - 0x8
-     - Supports extended attributes (COMPAT\_EXT\_ATTR).
+     - Supports extended attributes (COMPAT_EXT_ATTR).
    * - 0x10
      - Has reserved GDT blocks for filesystem expansion
-       (COMPAT\_RESIZE\_INODE). Requires RO\_COMPAT\_SPARSE\_SUPER.
+       (COMPAT_RESIZE_INODE). Requires RO_COMPAT_SPARSE_SUPER.
    * - 0x20
-     - Has directory indices (COMPAT\_DIR\_INDEX).
+     - Has directory indices (COMPAT_DIR_INDEX).
    * - 0x40
      - “Lazy BG”. Not in Linux kernel, seems to have been for uninitialized
-       block groups? (COMPAT\_LAZY\_BG)
+       block groups? (COMPAT_LAZY_BG)
    * - 0x80
-     - “Exclude inode”. Not used. (COMPAT\_EXCLUDE\_INODE).
+     - “Exclude inode”. Not used. (COMPAT_EXCLUDE_INODE).
    * - 0x100
      - “Exclude bitmap”. Seems to be used to indicate the presence of
        snapshot-related exclude bitmaps? Not defined in kernel or used in
-       e2fsprogs (COMPAT\_EXCLUDE\_BITMAP).
+       e2fsprogs (COMPAT_EXCLUDE_BITMAP).
    * - 0x200
-     - Sparse Super Block, v2. If this flag is set, the SB field s\_backup\_bgs
+     - Sparse Super Block, v2. If this flag is set, the SB field s_backup_bgs
        points to the two block groups that contain backup superblocks
-       (COMPAT\_SPARSE\_SUPER2).
+       (COMPAT_SPARSE_SUPER2).
    * - 0x400
      - Fast commits supported. Although fast commits blocks are
        backward incompatible, fast commit blocks are not always
        present in the journal. If fast commit blocks are present in
        the journal, JBD2 incompat feature
-       (JBD2\_FEATURE\_INCOMPAT\_FAST\_COMMIT) gets
-       set (COMPAT\_FAST\_COMMIT).
+       (JBD2_FEATURE_INCOMPAT_FAST_COMMIT) gets
+       set (COMPAT_FAST_COMMIT).
    * - 0x1000
      - Orphan file allocated. This is the special file for more efficient
        tracking of unlinked but still open inodes. When there may be any
        entries in the file, we additionally set proper rocompat feature
-       (RO\_COMPAT\_ORPHAN\_PRESENT).
+       (RO_COMPAT_ORPHAN_PRESENT).
 
 .. _super_incompat:
 
@@ -625,45 +625,45 @@ following:
    * - Value
      - Description
    * - 0x1
-     - Compression (INCOMPAT\_COMPRESSION).
+     - Compression (INCOMPAT_COMPRESSION).
    * - 0x2
-     - Directory entries record the file type. See ext4\_dir\_entry\_2 below
-       (INCOMPAT\_FILETYPE).
+     - Directory entries record the file type. See ext4_dir_entry_2 below
+       (INCOMPAT_FILETYPE).
    * - 0x4
-     - Filesystem needs recovery (INCOMPAT\_RECOVER).
+     - Filesystem needs recovery (INCOMPAT_RECOVER).
    * - 0x8
-     - Filesystem has a separate journal device (INCOMPAT\_JOURNAL\_DEV).
+     - Filesystem has a separate journal device (INCOMPAT_JOURNAL_DEV).
    * - 0x10
      - Meta block groups. See the earlier discussion of this feature
-       (INCOMPAT\_META\_BG).
+       (INCOMPAT_META_BG).
    * - 0x40
-     - Files in this filesystem use extents (INCOMPAT\_EXTENTS).
+     - Files in this filesystem use extents (INCOMPAT_EXTENTS).
    * - 0x80
-     - Enable a filesystem size of 2^64 blocks (INCOMPAT\_64BIT).
+     - Enable a filesystem size of 2^64 blocks (INCOMPAT_64BIT).
    * - 0x100
-     - Multiple mount protection (INCOMPAT\_MMP).
+     - Multiple mount protection (INCOMPAT_MMP).
    * - 0x200
      - Flexible block groups. See the earlier discussion of this feature
-       (INCOMPAT\_FLEX\_BG).
+       (INCOMPAT_FLEX_BG).
    * - 0x400
      - Inodes can be used to store large extended attribute values
-       (INCOMPAT\_EA\_INODE).
+       (INCOMPAT_EA_INODE).
    * - 0x1000
-     - Data in directory entry (INCOMPAT\_DIRDATA). (Not implemented?)
+     - Data in directory entry (INCOMPAT_DIRDATA). (Not implemented?)
    * - 0x2000
      - Metadata checksum seed is stored in the superblock. This feature enables
-       the administrator to change the UUID of a metadata\_csum filesystem
+       the administrator to change the UUID of a metadata_csum filesystem
        while the filesystem is mounted; without it, the checksum definition
-       requires all metadata blocks to be rewritten (INCOMPAT\_CSUM\_SEED).
+       requires all metadata blocks to be rewritten (INCOMPAT_CSUM_SEED).
    * - 0x4000
-     - Large directory >2GB or 3-level htree (INCOMPAT\_LARGEDIR). Prior to
+     - Large directory >2GB or 3-level htree (INCOMPAT_LARGEDIR). Prior to
        this feature, directories could not be larger than 4GiB and could not
        have an htree more than 2 levels deep. If this feature is enabled,
        directories can be larger than 4GiB and have a maximum htree depth of 3.
    * - 0x8000
-     - Data in inode (INCOMPAT\_INLINE\_DATA).
+     - Data in inode (INCOMPAT_INLINE_DATA).
    * - 0x10000
-     - Encrypted inodes are present on the filesystem. (INCOMPAT\_ENCRYPT).
+     - Encrypted inodes are present on the filesystem. (INCOMPAT_ENCRYPT).
 
 .. _super_rocompat:
 
@@ -678,54 +678,54 @@ the following:
      - Description
    * - 0x1
      - Sparse superblocks. See the earlier discussion of this feature
-       (RO\_COMPAT\_SPARSE\_SUPER).
+       (RO_COMPAT_SPARSE_SUPER).
    * - 0x2
      - This filesystem has been used to store a file greater than 2GiB
-       (RO\_COMPAT\_LARGE\_FILE).
+       (RO_COMPAT_LARGE_FILE).
    * - 0x4
-     - Not used in kernel or e2fsprogs (RO\_COMPAT\_BTREE\_DIR).
+     - Not used in kernel or e2fsprogs (RO_COMPAT_BTREE_DIR).
    * - 0x8
      - This filesystem has files whose sizes are represented in units of
        logical blocks, not 512-byte sectors. This implies a very large file
-       indeed! (RO\_COMPAT\_HUGE\_FILE)
+       indeed! (RO_COMPAT_HUGE_FILE)
    * - 0x10
      - Group descriptors have checksums. In addition to detecting corruption,
        this is useful for lazy formatting with uninitialized groups
-       (RO\_COMPAT\_GDT\_CSUM).
+       (RO_COMPAT_GDT_CSUM).
    * - 0x20
      - Indicates that the old ext3 32,000 subdirectory limit no longer applies
-       (RO\_COMPAT\_DIR\_NLINK). A directory's i\_links\_count will be set to 1
+       (RO_COMPAT_DIR_NLINK). A directory's i_links_count will be set to 1
        if it is incremented past 64,999.
    * - 0x40
      - Indicates that large inodes exist on this filesystem
-       (RO\_COMPAT\_EXTRA\_ISIZE).
+       (RO_COMPAT_EXTRA_ISIZE).
    * - 0x80
-     - This filesystem has a snapshot (RO\_COMPAT\_HAS\_SNAPSHOT).
+     - This filesystem has a snapshot (RO_COMPAT_HAS_SNAPSHOT).
    * - 0x100
-     - `Quota <Quota>`__ (RO\_COMPAT\_QUOTA).
+     - `Quota <Quota>`__ (RO_COMPAT_QUOTA).
    * - 0x200
      - This filesystem supports “bigalloc”, which means that file extents are
        tracked in units of clusters (of blocks) instead of blocks
-       (RO\_COMPAT\_BIGALLOC).
+       (RO_COMPAT_BIGALLOC).
    * - 0x400
      - This filesystem supports metadata checksumming.
-       (RO\_COMPAT\_METADATA\_CSUM; implies RO\_COMPAT\_GDT\_CSUM, though
-       GDT\_CSUM must not be set)
+       (RO_COMPAT_METADATA_CSUM; implies RO_COMPAT_GDT_CSUM, though
+       GDT_CSUM must not be set)
    * - 0x800
      - Filesystem supports replicas. This feature is neither in the kernel nor
-       e2fsprogs. (RO\_COMPAT\_REPLICA)
+       e2fsprogs. (RO_COMPAT_REPLICA)
    * - 0x1000
      - Read-only filesystem image; the kernel will not mount this image
        read-write and most tools will refuse to write to the image.
-       (RO\_COMPAT\_READONLY)
+       (RO_COMPAT_READONLY)
    * - 0x2000
-     - Filesystem tracks project quotas. (RO\_COMPAT\_PROJECT)
+     - Filesystem tracks project quotas. (RO_COMPAT_PROJECT)
    * - 0x8000
-     - Verity inodes may be present on the filesystem. (RO\_COMPAT\_VERITY)
+     - Verity inodes may be present on the filesystem. (RO_COMPAT_VERITY)
    * - 0x10000
      - Indicates orphan file may have valid orphan entries and thus we need
        to clean them up when mounting the filesystem
-       (RO\_COMPAT\_ORPHAN\_PRESENT).
+       (RO_COMPAT_ORPHAN_PRESENT).
 
 .. _super_def_hash:
 
@@ -761,36 +761,36 @@ The ``s_default_mount_opts`` field is any combination of the following:
    * - Value
      - Description
    * - 0x0001
-     - Print debugging info upon (re)mount. (EXT4\_DEFM\_DEBUG)
+     - Print debugging info upon (re)mount. (EXT4_DEFM_DEBUG)
    * - 0x0002
      - New files take the gid of the containing directory (instead of the fsgid
-       of the current process). (EXT4\_DEFM\_BSDGROUPS)
+       of the current process). (EXT4_DEFM_BSDGROUPS)
    * - 0x0004
-     - Support userspace-provided extended attributes. (EXT4\_DEFM\_XATTR\_USER)
+     - Support userspace-provided extended attributes. (EXT4_DEFM_XATTR_USER)
    * - 0x0008
-     - Support POSIX access control lists (ACLs). (EXT4\_DEFM\_ACL)
+     - Support POSIX access control lists (ACLs). (EXT4_DEFM_ACL)
    * - 0x0010
-     - Do not support 32-bit UIDs. (EXT4\_DEFM\_UID16)
+     - Do not support 32-bit UIDs. (EXT4_DEFM_UID16)
    * - 0x0020
      - All data and metadata are commited to the journal.
-       (EXT4\_DEFM\_JMODE\_DATA)
+       (EXT4_DEFM_JMODE_DATA)
    * - 0x0040
      - All data are flushed to the disk before metadata are committed to the
-       journal. (EXT4\_DEFM\_JMODE\_ORDERED)
+       journal. (EXT4_DEFM_JMODE_ORDERED)
    * - 0x0060
      - Data ordering is not preserved; data may be written after the metadata
-       has been written. (EXT4\_DEFM\_JMODE\_WBACK)
+       has been written. (EXT4_DEFM_JMODE_WBACK)
    * - 0x0100
-     - Disable write flushes. (EXT4\_DEFM\_NOBARRIER)
+     - Disable write flushes. (EXT4_DEFM_NOBARRIER)
    * - 0x0200
      - Track which blocks in a filesystem are metadata and therefore should not
        be used as data blocks. This option will be enabled by default on 3.18,
-       hopefully. (EXT4\_DEFM\_BLOCK\_VALIDITY)
+       hopefully. (EXT4_DEFM_BLOCK_VALIDITY)
    * - 0x0400
      - Enable DISCARD support, where the storage device is told about blocks
-       becoming unused. (EXT4\_DEFM\_DISCARD)
+       becoming unused. (EXT4_DEFM_DISCARD)
    * - 0x0800
-     - Disable delayed allocation. (EXT4\_DEFM\_NODELALLOC)
+     - Disable delayed allocation. (EXT4_DEFM_NODELALLOC)
 
 .. _super_flags:
 
@@ -820,12 +820,12 @@ The ``s_encrypt_algos`` list can contain any of the following:
    * - Value
      - Description
    * - 0
-     - Invalid algorithm (ENCRYPTION\_MODE\_INVALID).
+     - Invalid algorithm (ENCRYPTION_MODE_INVALID).
    * - 1
-     - 256-bit AES in XTS mode (ENCRYPTION\_MODE\_AES\_256\_XTS).
+     - 256-bit AES in XTS mode (ENCRYPTION_MODE_AES_256_XTS).
    * - 2
-     - 256-bit AES in GCM mode (ENCRYPTION\_MODE\_AES\_256\_GCM).
+     - 256-bit AES in GCM mode (ENCRYPTION_MODE_AES_256_GCM).
    * - 3
-     - 256-bit AES in CBC mode (ENCRYPTION\_MODE\_AES\_256\_CBC).
+     - 256-bit AES in CBC mode (ENCRYPTION_MODE_AES_256_CBC).
 
 Total size of the superblock is 1024 bytes.
index 4d19b19..73a4176 100644 (file)
@@ -301,7 +301,7 @@ through which it can issue requests and negotiate::
                void (*issue_read)(struct netfs_io_subrequest *subreq);
                bool (*is_still_valid)(struct netfs_io_request *rreq);
                int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
-                                        struct folio *folio, void **_fsdata);
+                                        struct folio **foliop, void **_fsdata);
                void (*done)(struct netfs_io_request *rreq);
        };
 
@@ -381,8 +381,10 @@ The operations are as follows:
    allocated/grabbed the folio to be modified to allow the filesystem to flush
    conflicting state before allowing it to be modified.
 
-   It should return 0 if everything is now fine, -EAGAIN if the folio should be
-   regrabbed and any other error code to abort the operation.
+   It may unlock and discard the folio it was given and set the caller's folio
+   pointer to NULL.  It should return 0 if everything is now fine (``*foliop``
+   left set) or the op should be retried (``*foliop`` cleared) and any other
+   error code to abort the operation.
 
  * ``done``
 
index b854bb4..6b2bac8 100644 (file)
@@ -129,18 +129,24 @@ yet. Bug reports are always welcome at the issue tracker below!
    * - arm64
      - Supported
      - ``LLVM=1``
+   * - hexagon
+     - Maintained
+     - ``LLVM=1``
    * - mips
      - Maintained
-     - ``CC=clang``
+     - ``LLVM=1``
    * - powerpc
      - Maintained
      - ``CC=clang``
    * - riscv
      - Maintained
-     - ``CC=clang``
+     - ``LLVM=1``
    * - s390
      - Maintained
      - ``CC=clang``
+   * - um (User Mode)
+     - Maintained
+     - ``LLVM=1``
    * - x86
      - Supported
      - ``LLVM=1``
index dbe9b40..7347638 100644 (file)
@@ -210,11 +210,11 @@ module->symtab.
 =====================================
 Normally, a stripped down copy of a module's symbol table (containing only
 "core" symbols) is made available through module->symtab (See layout_symtab()
-in kernel/module.c). For livepatch modules, the symbol table copied into memory
-on module load must be exactly the same as the symbol table produced when the
-patch module was compiled. This is because the relocations in each livepatch
-relocation section refer to their respective symbols with their symbol indices,
-and the original symbol indices (and thus the symtab ordering) must be
+in kernel/module/kallsyms.c). For livepatch modules, the symbol table copied
+into memory on module load must be exactly the same as the symbol table produced
+when the patch module was compiled. This is because the relocations in each
+livepatch relocation section refer to their respective symbols with their symbol
+indices, and the original symbol indices (and thus the symtab ordering) must be
 preserved in order for apply_relocate_add() to find the right symbol.
 
 For example, take this particular rela from a livepatch module:::
index 2bf40ad..216b3f3 100644 (file)
@@ -45,10 +45,12 @@ Name              Alias           Usage               Preserved
 ``$r23``-``$r31`` ``$s0``-``$s8`` Static registers    Yes
 ================= =============== =================== ============
 
-Note: The register ``$r21`` is reserved in the ELF psABI, but used by the Linux
-kernel for storing the percpu base address. It normally has no ABI name, but is
-called ``$u0`` in the kernel. You may also see ``$v0`` or ``$v1`` in some old code,
-however they are deprecated aliases of ``$a0`` and ``$a1`` respectively.
+.. Note::
+    The register ``$r21`` is reserved in the ELF psABI, but used by the Linux
+    kernel for storing the percpu base address. It normally has no ABI name,
+    but is called ``$u0`` in the kernel. You may also see ``$v0`` or ``$v1``
+    in some old code,however they are deprecated aliases of ``$a0`` and ``$a1``
+    respectively.
 
 FPRs
 ----
@@ -69,8 +71,9 @@ Name              Alias              Usage               Preserved
 ``$f24``-``$f31`` ``$fs0``-``$fs7``  Static registers    Yes
 ================= ================== =================== ============
 
-Note: You may see ``$fv0`` or ``$fv1`` in some old code, however they are deprecated
-aliases of ``$fa0`` and ``$fa1`` respectively.
+.. Note::
+    You may see ``$fv0`` or ``$fv1`` in some old code, however they are
+    deprecated aliases of ``$fa0`` and ``$fa1`` respectively.
 
 VRs
 ----
index 8d88f7a..7988f41 100644 (file)
@@ -145,12 +145,16 @@ Documentation of Loongson's LS7A chipset:
 
   https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-7A1000-usermanual-2.00-EN.pdf (in English)
 
-Note: CPUINTC is CSR.ECFG/CSR.ESTAT and its interrupt controller described
-in Section 7.4 of "LoongArch Reference Manual, Vol 1"; LIOINTC is "Legacy I/O
-Interrupts" described in Section 11.1 of "Loongson 3A5000 Processor Reference
-Manual"; EIOINTC is "Extended I/O Interrupts" described in Section 11.2 of
-"Loongson 3A5000 Processor Reference Manual"; HTVECINTC is "HyperTransport
-Interrupts" described in Section 14.3 of "Loongson 3A5000 Processor Reference
-Manual"; PCH-PIC/PCH-MSI is "Interrupt Controller" described in Section 5 of
-"Loongson 7A1000 Bridge User Manual"; PCH-LPC is "LPC Interrupts" described in
-Section 24.3 of "Loongson 7A1000 Bridge User Manual".
+.. Note::
+    - CPUINTC is CSR.ECFG/CSR.ESTAT and its interrupt controller described
+      in Section 7.4 of "LoongArch Reference Manual, Vol 1";
+    - LIOINTC is "Legacy I/OInterrupts" described in Section 11.1 of
+      "Loongson 3A5000 Processor Reference Manual";
+    - EIOINTC is "Extended I/O Interrupts" described in Section 11.2 of
+      "Loongson 3A5000 Processor Reference Manual";
+    - HTVECINTC is "HyperTransport Interrupts" described in Section 14.3 of
+      "Loongson 3A5000 Processor Reference Manual";
+    - PCH-PIC/PCH-MSI is "Interrupt Controller" described in Section 5 of
+      "Loongson 7A1000 Bridge User Manual";
+    - PCH-LPC is "LPC Interrupts" described in Section 24.3 of
+      "Loongson 7A1000 Bridge User Manual".
index b12df91..832b5d3 100644 (file)
@@ -1894,6 +1894,7 @@ There are some more advanced barrier functions:
 
  (*) dma_wmb();
  (*) dma_rmb();
+ (*) dma_mb();
 
      These are for use with consistent memory to guarantee the ordering
      of writes or reads of shared memory accessible to both the CPU and a
@@ -1925,11 +1926,11 @@ There are some more advanced barrier functions:
      The dma_rmb() allows us guarantee the device has released ownership
      before we read the data from the descriptor, and the dma_wmb() allows
      us to guarantee the data is written to the descriptor before the device
-     can see it now has ownership.  Note that, when using writel(), a prior
-     wmb() is not needed to guarantee that the cache coherent memory writes
-     have completed before writing to the MMIO region.  The cheaper
-     writel_relaxed() does not provide this guarantee and must not be used
-     here.
+     can see it now has ownership.  The dma_mb() implies both a dma_rmb() and
+     a dma_wmb().  Note that, when using writel(), a prior wmb() is not needed
+     to guarantee that the cache coherent memory writes have completed before
+     writing to the MMIO region.  The cheaper writel_relaxed() does not provide
+     this guarantee and must not be used here.
 
      See the subsection "Kernel I/O barrier effects" for more information on
      relaxed I/O accessors and the Documentation/core-api/dma-api.rst file for
index ed7fa76..d742ba6 100644 (file)
@@ -503,26 +503,108 @@ per-port PHY specific details: interface connection, MDIO bus location, etc.
 Driver development
 ==================
 
-DSA switch drivers need to implement a dsa_switch_ops structure which will
+DSA switch drivers need to implement a ``dsa_switch_ops`` structure which will
 contain the various members described below.
 
-``register_switch_driver()`` registers this dsa_switch_ops in its internal list
-of drivers to probe for. ``unregister_switch_driver()`` does the exact opposite.
+Probing, registration and device lifetime
+-----------------------------------------
 
-Unless requested differently by setting the priv_size member accordingly, DSA
-does not allocate any driver private context space.
+DSA switches are regular ``device`` structures on buses (be they platform, SPI,
+I2C, MDIO or otherwise). The DSA framework is not involved in their probing
+with the device core.
+
+Switch registration from the perspective of a driver means passing a valid
+``struct dsa_switch`` pointer to ``dsa_register_switch()``, usually from the
+switch driver's probing function. The following members must be valid in the
+provided structure:
+
+- ``ds->dev``: will be used to parse the switch's OF node or platform data.
+
+- ``ds->num_ports``: will be used to create the port list for this switch, and
+  to validate the port indices provided in the OF node.
+
+- ``ds->ops``: a pointer to the ``dsa_switch_ops`` structure holding the DSA
+  method implementations.
+
+- ``ds->priv``: backpointer to a driver-private data structure which can be
+  retrieved in all further DSA method callbacks.
+
+In addition, the following flags in the ``dsa_switch`` structure may optionally
+be configured to obtain driver-specific behavior from the DSA core. Their
+behavior when set is documented through comments in ``include/net/dsa.h``.
+
+- ``ds->vlan_filtering_is_global``
+
+- ``ds->needs_standalone_vlan_filtering``
+
+- ``ds->configure_vlan_while_not_filtering``
+
+- ``ds->untag_bridge_pvid``
+
+- ``ds->assisted_learning_on_cpu_port``
+
+- ``ds->mtu_enforcement_ingress``
+
+- ``ds->fdb_isolation``
+
+Internally, DSA keeps an array of switch trees (group of switches) global to
+the kernel, and attaches a ``dsa_switch`` structure to a tree on registration.
+The tree ID to which the switch is attached is determined by the first u32
+number of the ``dsa,member`` property of the switch's OF node (0 if missing).
+The switch ID within the tree is determined by the second u32 number of the
+same OF property (0 if missing). Registering multiple switches with the same
+switch ID and tree ID is illegal and will cause an error. Using platform data,
+a single switch and a single switch tree is permitted.
+
+In case of a tree with multiple switches, probing takes place asymmetrically.
+The first N-1 callers of ``dsa_register_switch()`` only add their ports to the
+port list of the tree (``dst->ports``), each port having a backpointer to its
+associated switch (``dp->ds``). Then, these switches exit their
+``dsa_register_switch()`` call early, because ``dsa_tree_setup_routing_table()``
+has determined that the tree is not yet complete (not all ports referenced by
+DSA links are present in the tree's port list). The tree becomes complete when
+the last switch calls ``dsa_register_switch()``, and this triggers the effective
+continuation of initialization (including the call to ``ds->ops->setup()``) for
+all switches within that tree, all as part of the calling context of the last
+switch's probe function.
+
+The opposite of registration takes place when calling ``dsa_unregister_switch()``,
+which removes a switch's ports from the port list of the tree. The entire tree
+is torn down when the first switch unregisters.
+
+It is mandatory for DSA switch drivers to implement the ``shutdown()`` callback
+of their respective bus, and call ``dsa_switch_shutdown()`` from it (a minimal
+version of the full teardown performed by ``dsa_unregister_switch()``).
+The reason is that DSA keeps a reference on the master net device, and if the
+driver for the master device decides to unbind on shutdown, DSA's reference
+will block that operation from finalizing.
+
+Either ``dsa_switch_shutdown()`` or ``dsa_unregister_switch()`` must be called,
+but not both, and the device driver model permits the bus' ``remove()`` method
+to be called even if ``shutdown()`` was already called. Therefore, drivers are
+expected to implement a mutual exclusion method between ``remove()`` and
+``shutdown()`` by setting their drvdata to NULL after any of these has run, and
+checking whether the drvdata is NULL before proceeding to take any action.
+
+After ``dsa_switch_shutdown()`` or ``dsa_unregister_switch()`` was called, no
+further callbacks via the provided ``dsa_switch_ops`` may take place, and the
+driver may free the data structures associated with the ``dsa_switch``.
 
 Switch configuration
 --------------------
 
-- ``tag_protocol``: this is to indicate what kind of tagging protocol is supported,
-  should be a valid value from the ``dsa_tag_protocol`` enum
+- ``get_tag_protocol``: this is to indicate what kind of tagging protocol is
+  supported, should be a valid value from the ``dsa_tag_protocol`` enum.
+  The returned information does not have to be static; the driver is passed the
+  CPU port number, as well as the tagging protocol of a possibly stacked
+  upstream switch, in case there are hardware limitations in terms of supported
+  tag formats.
 
-- ``probe``: probe routine which will be invoked by the DSA platform device upon
-  registration to test for the presence/absence of a switch device. For MDIO
-  devices, it is recommended to issue a read towards internal registers using
-  the switch pseudo-PHY and return whether this is a supported device. For other
-  buses, return a non-NULL string
+- ``change_tag_protocol``: when the default tagging protocol has compatibility
+  problems with the master or other issues, the driver may support changing it
+  at runtime, either through a device tree property or through sysfs. In that
+  case, further calls to ``get_tag_protocol`` should report the protocol in
+  current use.
 
 - ``setup``: setup function for the switch, this function is responsible for setting
   up the ``dsa_switch_ops`` private structure with all it needs: register maps,
@@ -535,7 +617,17 @@ Switch configuration
   fully configured and ready to serve any kind of request. It is recommended
   to issue a software reset of the switch during this setup function in order to
   avoid relying on what a previous software agent such as a bootloader/firmware
-  may have previously configured.
+  may have previously configured. The method responsible for undoing any
+  applicable allocations or operations done here is ``teardown``.
+
+- ``port_setup`` and ``port_teardown``: methods for initialization and
+  destruction of per-port data structures. It is mandatory for some operations
+  such as registering and unregistering devlink port regions to be done from
+  these methods, otherwise they are optional. A port will be torn down only if
+  it has been previously set up. It is possible for a port to be set up during
+  probing only to be torn down immediately afterwards, for example in case its
+  PHY cannot be found. In this case, probing of the DSA switch continues
+  without that particular port.
 
 PHY devices and link management
 -------------------------------
@@ -635,26 +727,198 @@ Power management
   ``BR_STATE_DISABLED`` and propagating changes to the hardware if this port is
   disabled while being a bridge member
 
+Address databases
+-----------------
+
+Switching hardware is expected to have a table for FDB entries, however not all
+of them are active at the same time. An address database is the subset (partition)
+of FDB entries that is active (can be matched by address learning on RX, or FDB
+lookup on TX) depending on the state of the port. An address database may
+occasionally be called "FID" (Filtering ID) in this document, although the
+underlying implementation may choose whatever is available to the hardware.
+
+For example, all ports that belong to a VLAN-unaware bridge (which is
+*currently* VLAN-unaware) are expected to learn source addresses in the
+database associated by the driver with that bridge (and not with other
+VLAN-unaware bridges). During forwarding and FDB lookup, a packet received on a
+VLAN-unaware bridge port should be able to find a VLAN-unaware FDB entry having
+the same MAC DA as the packet, which is present on another port member of the
+same bridge. At the same time, the FDB lookup process must be able to not find
+an FDB entry having the same MAC DA as the packet, if that entry points towards
+a port which is a member of a different VLAN-unaware bridge (and is therefore
+associated with a different address database).
+
+Similarly, each VLAN of each offloaded VLAN-aware bridge should have an
+associated address database, which is shared by all ports which are members of
+that VLAN, but not shared by ports belonging to different bridges that are
+members of the same VID.
+
+In this context, a VLAN-unaware database means that all packets are expected to
+match on it irrespective of VLAN ID (only MAC address lookup), whereas a
+VLAN-aware database means that packets are supposed to match based on the VLAN
+ID from the classified 802.1Q header (or the pvid if untagged).
+
+At the bridge layer, VLAN-unaware FDB entries have the special VID value of 0,
+whereas VLAN-aware FDB entries have non-zero VID values. Note that a
+VLAN-unaware bridge may have VLAN-aware (non-zero VID) FDB entries, and a
+VLAN-aware bridge may have VLAN-unaware FDB entries. As in hardware, the
+software bridge keeps separate address databases, and offloads to hardware the
+FDB entries belonging to these databases, through switchdev, asynchronously
+relative to the moment when the databases become active or inactive.
+
+When a user port operates in standalone mode, its driver should configure it to
+use a separate database called a port private database. This is different from
+the databases described above, and should impede operation as standalone port
+(packet in, packet out to the CPU port) as little as possible. For example,
+on ingress, it should not attempt to learn the MAC SA of ingress traffic, since
+learning is a bridging layer service and this is a standalone port, therefore
+it would consume useless space. With no address learning, the port private
+database should be empty in a naive implementation, and in this case, all
+received packets should be trivially flooded to the CPU port.
+
+DSA (cascade) and CPU ports are also called "shared" ports because they service
+multiple address databases, and the database that a packet should be associated
+to is usually embedded in the DSA tag. This means that the CPU port may
+simultaneously transport packets coming from a standalone port (which were
+classified by hardware in one address database), and from a bridge port (which
+were classified to a different address database).
+
+Switch drivers which satisfy certain criteria are able to optimize the naive
+configuration by removing the CPU port from the flooding domain of the switch,
+and just program the hardware with FDB entries pointing towards the CPU port
+for which it is known that software is interested in those MAC addresses.
+Packets which do not match a known FDB entry will not be delivered to the CPU,
+which will save CPU cycles required for creating an skb just to drop it.
+
+DSA is able to perform host address filtering for the following kinds of
+addresses:
+
+- Primary unicast MAC addresses of ports (``dev->dev_addr``). These are
+  associated with the port private database of the respective user port,
+  and the driver is notified to install them through ``port_fdb_add`` towards
+  the CPU port.
+
+- Secondary unicast and multicast MAC addresses of ports (addresses added
+  through ``dev_uc_add()`` and ``dev_mc_add()``). These are also associated
+  with the port private database of the respective user port.
+
+- Local/permanent bridge FDB entries (``BR_FDB_LOCAL``). These are the MAC
+  addresses of the bridge ports, for which packets must be terminated locally
+  and not forwarded. They are associated with the address database for that
+  bridge.
+
+- Static bridge FDB entries installed towards foreign (non-DSA) interfaces
+  present in the same bridge as some DSA switch ports. These are also
+  associated with the address database for that bridge.
+
+- Dynamically learned FDB entries on foreign interfaces present in the same
+  bridge as some DSA switch ports, only if ``ds->assisted_learning_on_cpu_port``
+  is set to true by the driver. These are associated with the address database
+  for that bridge.
+
+For various operations detailed below, DSA provides a ``dsa_db`` structure
+which can be of the following types:
+
+- ``DSA_DB_PORT``: the FDB (or MDB) entry to be installed or deleted belongs to
+  the port private database of user port ``db->dp``.
+- ``DSA_DB_BRIDGE``: the entry belongs to one of the address databases of bridge
+  ``db->bridge``. Separation between the VLAN-unaware database and the per-VID
+  databases of this bridge is expected to be done by the driver.
+- ``DSA_DB_LAG``: the entry belongs to the address database of LAG ``db->lag``.
+  Note: ``DSA_DB_LAG`` is currently unused and may be removed in the future.
+
+The drivers which act upon the ``dsa_db`` argument in ``port_fdb_add``,
+``port_mdb_add`` etc should declare ``ds->fdb_isolation`` as true.
+
+DSA associates each offloaded bridge and each offloaded LAG with a one-based ID
+(``struct dsa_bridge :: num``, ``struct dsa_lag :: id``) for the purposes of
+refcounting addresses on shared ports. Drivers may piggyback on DSA's numbering
+scheme (the ID is readable through ``db->bridge.num`` and ``db->lag.id`` or may
+implement their own.
+
+Only the drivers which declare support for FDB isolation are notified of FDB
+entries on the CPU port belonging to ``DSA_DB_PORT`` databases.
+For compatibility/legacy reasons, ``DSA_DB_BRIDGE`` addresses are notified to
+drivers even if they do not support FDB isolation. However, ``db->bridge.num``
+and ``db->lag.id`` are always set to 0 in that case (to denote the lack of
+isolation, for refcounting purposes).
+
+Note that it is not mandatory for a switch driver to implement physically
+separate address databases for each standalone user port. Since FDB entries in
+the port private databases will always point to the CPU port, there is no risk
+for incorrect forwarding decisions. In this case, all standalone ports may
+share the same database, but the reference counting of host-filtered addresses
+(not deleting the FDB entry for a port's MAC address if it's still in use by
+another port) becomes the responsibility of the driver, because DSA is unaware
+that the port databases are in fact shared. This can be achieved by calling
+``dsa_fdb_present_in_other_db()`` and ``dsa_mdb_present_in_other_db()``.
+The down side is that the RX filtering lists of each user port are in fact
+shared, which means that user port A may accept a packet with a MAC DA it
+shouldn't have, only because that MAC address was in the RX filtering list of
+user port B. These packets will still be dropped in software, however.
+
 Bridge layer
 ------------
 
+Offloading the bridge forwarding plane is optional and handled by the methods
+below. They may be absent, return -EOPNOTSUPP, or ``ds->max_num_bridges`` may
+be non-zero and exceeded, and in this case, joining a bridge port is still
+possible, but the packet forwarding will take place in software, and the ports
+under a software bridge must remain configured in the same way as for
+standalone operation, i.e. have all bridging service functions (address
+learning etc) disabled, and send all received packets to the CPU port only.
+
+Concretely, a port starts offloading the forwarding plane of a bridge once it
+returns success to the ``port_bridge_join`` method, and stops doing so after
+``port_bridge_leave`` has been called. Offloading the bridge means autonomously
+learning FDB entries in accordance with the software bridge port's state, and
+autonomously forwarding (or flooding) received packets without CPU intervention.
+This is optional even when offloading a bridge port. Tagging protocol drivers
+are expected to call ``dsa_default_offload_fwd_mark(skb)`` for packets which
+have already been autonomously forwarded in the forwarding domain of the
+ingress switch port. DSA, through ``dsa_port_devlink_setup()``, considers all
+switch ports part of the same tree ID to be part of the same bridge forwarding
+domain (capable of autonomous forwarding to each other).
+
+Offloading the TX forwarding process of a bridge is a distinct concept from
+simply offloading its forwarding plane, and refers to the ability of certain
+driver and tag protocol combinations to transmit a single skb coming from the
+bridge device's transmit function to potentially multiple egress ports (and
+thereby avoid its cloning in software).
+
+Packets for which the bridge requests this behavior are called data plane
+packets and have ``skb->offload_fwd_mark`` set to true in the tag protocol
+driver's ``xmit`` function. Data plane packets are subject to FDB lookup,
+hardware learning on the CPU port, and do not override the port STP state.
+Additionally, replication of data plane packets (multicast, flooding) is
+handled in hardware and the bridge driver will transmit a single skb for each
+packet that may or may not need replication.
+
+When the TX forwarding offload is enabled, the tag protocol driver is
+responsible to inject packets into the data plane of the hardware towards the
+correct bridging domain (FID) that the port is a part of. The port may be
+VLAN-unaware, and in this case the FID must be equal to the FID used by the
+driver for its VLAN-unaware address database associated with that bridge.
+Alternatively, the bridge may be VLAN-aware, and in that case, it is guaranteed
+that the packet is also VLAN-tagged with the VLAN ID that the bridge processed
+this packet in. It is the responsibility of the hardware to untag the VID on
+the egress-untagged ports, or keep the tag on the egress-tagged ones.
+
 - ``port_bridge_join``: bridge layer function invoked when a given switch port is
   added to a bridge, this function should do what's necessary at the switch
   level to permit the joining port to be added to the relevant logical
   domain for it to ingress/egress traffic with other members of the bridge.
+  By setting the ``tx_fwd_offload`` argument to true, the TX forwarding process
+  of this bridge is also offloaded.
 
 - ``port_bridge_leave``: bridge layer function invoked when a given switch port is
   removed from a bridge, this function should do what's necessary at the
   switch level to deny the leaving port from ingress/egress traffic from the
-  remaining bridge members. When the port leaves the bridge, it should be aged
-  out at the switch hardware for the switch to (re) learn MAC addresses behind
-  this port.
+  remaining bridge members.
 
 - ``port_stp_state_set``: bridge layer function invoked when a given switch port STP
   state is computed by the bridge layer and should be propagated to switch
-  hardware to forward/block/learn traffic. The switch driver is responsible for
-  computing a STP state change based on current and asked parameters and perform
-  the relevant ageing based on the intersection results
+  hardware to forward/block/learn traffic.
 
 - ``port_bridge_flags``: bridge layer function invoked when a port must
   configure its settings for e.g. flooding of unknown traffic or source address
@@ -667,21 +931,11 @@ Bridge layer
   CPU port, and flooding towards the CPU port should also be enabled, due to a
   lack of an explicit address filtering mechanism in the DSA core.
 
-- ``port_bridge_tx_fwd_offload``: bridge layer function invoked after
-  ``port_bridge_join`` when a driver sets ``ds->num_fwd_offloading_bridges`` to
-  a non-zero value. Returning success in this function activates the TX
-  forwarding offload bridge feature for this port, which enables the tagging
-  protocol driver to inject data plane packets towards the bridging domain that
-  the port is a part of. Data plane packets are subject to FDB lookup, hardware
-  learning on the CPU port, and do not override the port STP state.
-  Additionally, replication of data plane packets (multicast, flooding) is
-  handled in hardware and the bridge driver will transmit a single skb for each
-  packet that needs replication. The method is provided as a configuration
-  point for drivers that need to configure the hardware for enabling this
-  feature.
-
-- ``port_bridge_tx_fwd_unoffload``: bridge layer function invoked when a driver
-  leaves a bridge port which had the TX forwarding offload feature enabled.
+- ``port_fast_age``: bridge layer function invoked when flushing the
+  dynamically learned FDB entries on the port is necessary. This is called when
+  transitioning from an STP state where learning should take place to an STP
+  state where it shouldn't, or when leaving a bridge, or when address learning
+  is turned off via ``port_bridge_flags``.
 
 Bridge VLAN filtering
 ---------------------
@@ -697,55 +951,44 @@ Bridge VLAN filtering
   allowed.
 
 - ``port_vlan_add``: bridge layer function invoked when a VLAN is configured
-  (tagged or untagged) for the given switch port. If the operation is not
-  supported by the hardware, this function should return ``-EOPNOTSUPP`` to
-  inform the bridge code to fallback to a software implementation.
+  (tagged or untagged) for the given switch port. The CPU port becomes a member
+  of a VLAN only if a foreign bridge port is also a member of it (and
+  forwarding needs to take place in software), or the VLAN is installed to the
+  VLAN group of the bridge device itself, for termination purposes
+  (``bridge vlan add dev br0 vid 100 self``). VLANs on shared ports are
+  reference counted and removed when there is no user left. Drivers do not need
+  to manually install a VLAN on the CPU port.
 
 - ``port_vlan_del``: bridge layer function invoked when a VLAN is removed from the
   given switch port
 
-- ``port_vlan_dump``: bridge layer function invoked with a switchdev callback
-  function that the driver has to call for each VLAN the given port is a member
-  of. A switchdev object is used to carry the VID and bridge flags.
-
 - ``port_fdb_add``: bridge layer function invoked when the bridge wants to install a
   Forwarding Database entry, the switch hardware should be programmed with the
   specified address in the specified VLAN Id in the forwarding database
-  associated with this VLAN ID. If the operation is not supported, this
-  function should return ``-EOPNOTSUPP`` to inform the bridge code to fallback to
-  a software implementation.
-
-.. note:: VLAN ID 0 corresponds to the port private database, which, in the context
-        of DSA, would be its port-based VLAN, used by the associated bridge device.
+  associated with this VLAN ID.
 
 - ``port_fdb_del``: bridge layer function invoked when the bridge wants to remove a
   Forwarding Database entry, the switch hardware should be programmed to delete
   the specified MAC address from the specified VLAN ID if it was mapped into
   this port forwarding database
 
-- ``port_fdb_dump``: bridge layer function invoked with a switchdev callback
-  function that the driver has to call for each MAC address known to be behind
-  the given port. A switchdev object is used to carry the VID and FDB info.
+- ``port_fdb_dump``: bridge bypass function invoked by ``ndo_fdb_dump`` on the
+  physical DSA port interfaces. Since DSA does not attempt to keep in sync its
+  hardware FDB entries with the software bridge, this method is implemented as
+  a means to view the entries visible on user ports in the hardware database.
+  The entries reported by this function have the ``self`` flag in the output of
+  the ``bridge fdb show`` command.
 
 - ``port_mdb_add``: bridge layer function invoked when the bridge wants to install
-  a multicast database entry. If the operation is not supported, this function
-  should return ``-EOPNOTSUPP`` to inform the bridge code to fallback to a
-  software implementation. The switch hardware should be programmed with the
+  a multicast database entry. The switch hardware should be programmed with the
   specified address in the specified VLAN ID in the forwarding database
   associated with this VLAN ID.
 
-.. note:: VLAN ID 0 corresponds to the port private database, which, in the context
-        of DSA, would be its port-based VLAN, used by the associated bridge device.
-
 - ``port_mdb_del``: bridge layer function invoked when the bridge wants to remove a
   multicast database entry, the switch hardware should be programmed to delete
   the specified MAC address from the specified VLAN ID if it was mapped into
   this port forwarding database.
 
-- ``port_mdb_dump``: bridge layer function invoked with a switchdev callback
-  function that the driver has to call for each MAC address known to be behind
-  the given port. A switchdev object is used to carry the VID and MDB info.
-
 Link aggregation
 ----------------
 
index 0421656..d7a1bf1 100644 (file)
@@ -1052,11 +1052,7 @@ udp_rmem_min - INTEGER
        Default: 4K
 
 udp_wmem_min - INTEGER
-       Minimal size of send buffer used by UDP sockets in moderation.
-       Each UDP socket is able to use the size for sending data, even if
-       total pages of UDP sockets exceed udp_mem pressure. The unit is byte.
-
-       Default: 4K
+       UDP does not have tx memory accounting and this tunable has no effect.
 
 RAW variables
 =============
@@ -1085,7 +1081,7 @@ cipso_cache_enable - BOOLEAN
 cipso_cache_bucket_size - INTEGER
        The CIPSO label cache consists of a fixed size hash table with each
        hash bucket containing a number of cache entries.  This variable limits
-       the number of entries in each hash bucket; the larger the value the
+       the number of entries in each hash bucket; the larger the value is, the
        more CIPSO label mappings that can be cached.  When the number of
        entries in a given hash bucket reaches this limit adding new entries
        causes the oldest entry in the bucket to be removed to make room.
@@ -1179,7 +1175,7 @@ ip_autobind_reuse - BOOLEAN
        option should only be set by experts.
        Default: 0
 
-ip_dynaddr - BOOLEAN
+ip_dynaddr - INTEGER
        If set non-zero, enables support for dynamic addresses.
        If set to a non-zero value larger than 1, a kernel log
        message will be printed when dynamic address rewriting
@@ -2870,7 +2866,14 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max
        Default: 4K
 
 sctp_wmem  - vector of 3 INTEGERs: min, default, max
-       Currently this tunable has no effect.
+       Only the first value ("min") is used, "default" and "max" are
+       ignored.
+
+       min: Minimum size of send buffer that can be used by SCTP sockets.
+       It is guaranteed to each SCTP socket (but not association) even
+       under moderate memory pressure.
+
+       Default: 4K
 
 addr_scope_policy - INTEGER
        Control IPv4 address scoping - draft-stewart-tsvwg-sctp-ipv4-00
@@ -2925,6 +2928,43 @@ plpmtud_probe_interval - INTEGER
 
        Default: 0
 
+reconf_enable - BOOLEAN
+        Enable or disable extension of Stream Reconfiguration functionality
+        specified in RFC6525. This extension provides the ability to "reset"
+        a stream, and it includes the Parameters of "Outgoing/Incoming SSN
+        Reset", "SSN/TSN Reset" and "Add Outgoing/Incoming Streams".
+
+       - 1: Enable extension.
+       - 0: Disable extension.
+
+       Default: 0
+
+intl_enable - BOOLEAN
+        Enable or disable extension of User Message Interleaving functionality
+        specified in RFC8260. This extension allows the interleaving of user
+        messages sent on different streams. With this feature enabled, I-DATA
+        chunk will replace DATA chunk to carry user messages if also supported
+        by the peer. Note that to use this feature, one needs to set this option
+        to 1 and also needs to set socket options SCTP_FRAGMENT_INTERLEAVE to 2
+        and SCTP_INTERLEAVING_SUPPORTED to 1.
+
+       - 1: Enable extension.
+       - 0: Disable extension.
+
+       Default: 0
+
+ecn_enable - BOOLEAN
+        Control use of Explicit Congestion Notification (ECN) by SCTP.
+        Like in TCP, ECN is used only when both ends of the SCTP connection
+        indicate support for it. This feature is useful in avoiding losses
+        due to congestion by allowing supporting routers to signal congestion
+        before having to drop packets.
+
+        1: Enable ecn.
+        0: Disable ecn.
+
+        Default: 1
+
 
 ``/proc/sys/net/core/*``
 ========================
index d43da70..704f31d 100644 (file)
@@ -104,7 +104,7 @@ Whenever possible, use the PHY side RGMII delay for these reasons:
 
 * PHY device drivers in PHYLIB being reusable by nature, being able to
   configure correctly a specified delay enables more designs with similar delay
-  requirements to be operate correctly
+  requirements to be operated correctly
 
 For cases where the PHY is not capable of providing this delay, but the
 Ethernet MAC driver is capable of doing so, the correct phy_interface_t value
index c456b52..d140070 100644 (file)
@@ -6,6 +6,15 @@
 netdev FAQ
 ==========
 
+tl;dr
+-----
+
+ - designate your patch to a tree - ``[PATCH net]`` or ``[PATCH net-next]``
+ - for fixes the ``Fixes:`` tag is required, regardless of the tree
+ - don't post large series (> 15 patches), break them up
+ - don't repost your patches within one 24h period
+ - reverse xmas tree
+
 What is netdev?
 ---------------
 It is a mailing list for all network-related Linux stuff.  This
@@ -136,6 +145,20 @@ it to the maintainer to figure out what is the most recent and current
 version that should be applied. If there is any doubt, the maintainer
 will reply and ask what should be done.
 
+How do I divide my work into patches?
+-------------------------------------
+
+Put yourself in the shoes of the reviewer. Each patch is read separately
+and therefore should constitute a comprehensible step towards your stated
+goal.
+
+Avoid sending series longer than 15 patches. Larger series takes longer
+to review as reviewers will defer looking at it until they find a large
+chunk of time. A small series can be reviewed in a short time, so Maintainers
+just do it. As a result, a sequence of smaller series gets merged quicker and
+with better review coverage. Re-posting large series also increases the mailing
+list traffic.
+
 I made changes to only a few patches in a patch series should I resend only those changed?
 ------------------------------------------------------------------------------------------
 No, please resend the entire patch series and make sure you do number your
@@ -183,6 +206,19 @@ it is requested that you make it look like this::
    * another line of text
    */
 
+What is "reverse xmas tree"?
+----------------------------
+
+Netdev has a convention for ordering local variables in functions.
+Order the variable declaration lines longest to shortest, e.g.::
+
+  struct scatterlist *sg;
+  struct sk_buff *skb;
+  int err, i;
+
+If there are dependencies between the variables preventing the ordering
+move the initialization out of line.
+
 I am working in existing code which uses non-standard formatting. Which formatting should I use?
 ------------------------------------------------------------------------------------------------
 Make your code follow the most recent guidelines, so that eventually all code
index 009b07e..bf84313 100644 (file)
@@ -10,7 +10,7 @@ AC97
 ====
 
 AC97 is a five wire interface commonly found on many PC sound cards. It is
-now also popular in many portable devices. This DAI has a reset line and time
+now also popular in many portable devices. This DAI has a RESET line and time
 multiplexes its data on its SDATA_OUT (playback) and SDATA_IN (capture) lines.
 The bit clock (BCLK) is always driven by the CODEC (usually 12.288MHz) and the
 frame (FRAME) (usually 48kHz) is always driven by the controller. Each AC97
index 42f5d04..0f68988 100644 (file)
@@ -50,9 +50,9 @@ Di conseguenza, nella tabella dei simboli del kernel ci sarà una voce
 rappresentata dalla struttura ``kernel_symbol`` che avrà il campo
 ``namespace`` (spazio dei nomi) impostato. Un simbolo esportato senza uno spazio
 dei nomi avrà questo campo impostato a ``NULL``. Non esiste uno spazio dei nomi
-di base. Il programma ``modpost`` e il codice in kernel/module.c usano lo spazio
-dei nomi, rispettivamente, durante la compilazione e durante il caricamento
-di un modulo.
+di base. Il programma ``modpost`` e il codice in kernel/module/main.c usano lo
+spazio dei nomi, rispettivamente, durante la compilazione e durante il
+caricamento di un modulo.
 
 2.2 Usare il simbolo di preprocessore DEFAULT_SYMBOL_NAMESPACE
 ==============================================================
index e45fe80..962d31d 100644 (file)
@@ -224,7 +224,7 @@ kernel/kmod.c
 模块接口支持
 ------------
 
-更多信息请参考文件kernel/module.c
+更多信息请参阅kernel/module/目录下的文件
 
 硬件接口
 ========
index 6abf7ed..bb16f06 100644 (file)
@@ -52,7 +52,7 @@
 
 相应的 ksymtab 条目结构体 ``kernel_symbol`` 将有相应的成员 ``命名空间`` 集。
 导出时未指明命名空间的符号将指向 ``NULL`` 。如果没有定义命名空间,则默认没有。
-``modpost`` 和kernel/module.c分别在构建时或模块加载时使用名称空间。
+``modpost`` 和kernel/module/main.c分别在构建时或模块加载时使用名称空间。
 
 2.2 使用DEFAULT_SYMBOL_NAMESPACE定义
 ====================================
index e31a1a9..11686ee 100644 (file)
@@ -46,10 +46,11 @@ LA64中每个寄存器为64位宽。 ``$r0`` 的内容总是固定为0,而其
 ``$r23``-``$r31`` ``$s0``-``$s8`` 静态寄存器          是
 ================= =============== =================== ==========
 
-注意:``$r21``寄存器在ELF psABI中保留未使用,但是在Linux内核用于保存每CPU
-变量基地址。该寄存器没有ABI命名,不过在内核中称为``$u0``。在一些遗留代码
-中有时可能见到``$v0``和``$v1``,它们是``$a0``和``$a1``的别名,属于已经废弃
-的用法。
+.. note::
+    注意: ``$r21`` 寄存器在ELF psABI中保留未使用,但是在Linux内核用于保
+    存每CPU变量基地址。该寄存器没有ABI命名,不过在内核中称为 ``$u0`` 。在
+    一些遗留代码中有时可能见到 ``$v0`` 和 ``$v1`` ,它们是 ``$a0`` 和
+    ``$a1`` 的别名,属于已经废弃的用法。
 
 浮点寄存器
 ----------
@@ -68,8 +69,9 @@ LA64中每个寄存器为64位宽。 ``$r0`` 的内容总是固定为0,而其
 ``$f24``-``$f31`` ``$fs0``-``$fs7``  静态寄存器          是
 ================= ================== =================== ==========
 
-注意:在一些遗留代码中有时可能见到 ``$v0`` 和 ``$v1`` ,它们是 ``$a0``
-和 ``$a1`` 的别名,属于已经废弃的用法。
+.. note::
+    注意:在一些遗留代码中有时可能见到 ``$v0`` 和 ``$v1`` ,它们是
+    ``$a0`` 和 ``$a1`` 的别名,属于已经废弃的用法。
 
 
 向量寄存器
index 2a4c3ad..fb5d23b 100644 (file)
@@ -147,9 +147,11 @@ PCH-LPC::
 
   https://github.com/loongson/LoongArch-Documentation/releases/latest/download/Loongson-7A1000-usermanual-2.00-EN.pdf (英文版)
 
-注:CPUINTC即《龙芯架构参考手册卷一》第7.4节所描述的CSR.ECFG/CSR.ESTAT寄存器及其中断
-控制逻辑;LIOINTC即《龙芯3A5000处理器使用手册》第11.1节所描述的“传统I/O中断”;EIOINTC
-即《龙芯3A5000处理器使用手册》第11.2节所描述的“扩展I/O中断”;HTVECINTC即《龙芯3A5000
-处理器使用手册》第14.3节所描述的“HyperTransport中断”;PCH-PIC/PCH-MSI即《龙芯7A1000桥
-片用户手册》第5章所描述的“中断控制器”;PCH-LPC即《龙芯7A1000桥片用户手册》第24.3节所
-描述的“LPC中断”。
+.. note::
+    - CPUINTC:即《龙芯架构参考手册卷一》第7.4节所描述的CSR.ECFG/CSR.ESTAT寄存器及其
+      中断控制逻辑;
+    - LIOINTC:即《龙芯3A5000处理器使用手册》第11.1节所描述的“传统I/O中断”;
+    - EIOINTC:即《龙芯3A5000处理器使用手册》第11.2节所描述的“扩展I/O中断”;
+    - HTVECINTC:即《龙芯3A5000处理器使用手册》第14.3节所描述的“HyperTransport中断”;
+    - PCH-PIC/PCH-MSI:即《龙芯7A1000桥片用户手册》第5章所描述的“中断控制器”;
+    - PCH-LPC:即《龙芯7A1000桥片用户手册》第24.3节所描述的“LPC中断”。
index 11e00a4..98a2839 100644 (file)
@@ -5657,7 +5657,8 @@ by a string of size ``name_size``.
        #define KVM_STATS_UNIT_BYTES            (0x1 << KVM_STATS_UNIT_SHIFT)
        #define KVM_STATS_UNIT_SECONDS          (0x2 << KVM_STATS_UNIT_SHIFT)
        #define KVM_STATS_UNIT_CYCLES           (0x3 << KVM_STATS_UNIT_SHIFT)
-       #define KVM_STATS_UNIT_MAX              KVM_STATS_UNIT_CYCLES
+       #define KVM_STATS_UNIT_BOOLEAN          (0x4 << KVM_STATS_UNIT_SHIFT)
+       #define KVM_STATS_UNIT_MAX              KVM_STATS_UNIT_BOOLEAN
 
        #define KVM_STATS_BASE_SHIFT            8
        #define KVM_STATS_BASE_MASK             (0xF << KVM_STATS_BASE_SHIFT)
@@ -5702,14 +5703,13 @@ Bits 0-3 of ``flags`` encode the type:
     by the ``hist_param`` field. The range of the Nth bucket (1 <= N < ``size``)
     is [``hist_param``*(N-1), ``hist_param``*N), while the range of the last
     bucket is [``hist_param``*(``size``-1), +INF). (+INF means positive infinity
-    value.) The bucket value indicates how many samples fell in the bucket's range.
+    value.)
   * ``KVM_STATS_TYPE_LOG_HIST``
     The statistic is reported as a logarithmic histogram. The number of
     buckets is specified by the ``size`` field. The range of the first bucket is
     [0, 1), while the range of the last bucket is [pow(2, ``size``-2), +INF).
     Otherwise, The Nth bucket (1 < N < ``size``) covers
-    [pow(2, N-2), pow(2, N-1)). The bucket value indicates how many samples fell
-    in the bucket's range.
+    [pow(2, N-2), pow(2, N-1)).
 
 Bits 4-7 of ``flags`` encode the unit:
 
@@ -5724,6 +5724,15 @@ Bits 4-7 of ``flags`` encode the unit:
     It indicates that the statistics data is used to measure time or latency.
   * ``KVM_STATS_UNIT_CYCLES``
     It indicates that the statistics data is used to measure CPU clock cycles.
+  * ``KVM_STATS_UNIT_BOOLEAN``
+    It indicates that the statistic will always be either 0 or 1.  Boolean
+    statistics of "peak" type will never go back from 1 to 0.  Boolean
+    statistics can be linear histograms (with two buckets) but not logarithmic
+    histograms.
+
+Note that, in the case of histograms, the unit applies to the bucket
+ranges, while the bucket value indicates how many samples fell in the
+bucket's range.
 
 Bits 8-11 of ``flags``, together with ``exponent``, encode the scale of the
 unit:
@@ -5746,7 +5755,7 @@ the corresponding statistics data.
 
 The ``bucket_size`` field is used as a parameter for histogram statistics data.
 It is only used by linear histogram statistics data, specifying the size of a
-bucket.
+bucket in the unit expressed by bits 4-11 of ``flags`` together with ``exponent``.
 
 The ``name`` field is the name string of the statistics data. The name string
 starts at the end of ``struct kvm_stats_desc``.  The maximum length including
index 4d43fbc..412b276 100644 (file)
@@ -60,12 +60,13 @@ these functions (see arch/arm{,64}/include/asm/virt.h):
 
 * ::
 
-    x0 = HVC_VHE_RESTART (arm64 only)
+    x0 = HVC_FINALISE_EL2 (arm64 only)
 
-  Attempt to upgrade the kernel's exception level from EL1 to EL2 by enabling
-  the VHE mode. This is conditioned by the CPU supporting VHE, the EL2 MMU
-  being off, and VHE not being disabled by any other means (command line
-  option, for example).
+  Finish configuring EL2 depending on the command-line options,
+  including an attempt to upgrade the kernel's exception level from
+  EL1 to EL2 by enabling the VHE mode. This is conditioned by the CPU
+  supporting VHE, the EL2 MMU being off, and VHE not being disabled by
+  any other means (command line option, for example).
 
 Any other value of r0/x0 triggers a hypervisor-specific handling,
 which is not documented here.
index c742de1..b9d5253 100644 (file)
@@ -120,7 +120,8 @@ Testing
   unpoison-pfn
        Software-unpoison page at PFN echoed into this file. This way
        a page can be reused again.  This only works for Linux
-       injected failures, not for real memory failures.
+       injected failures, not for real memory failures. Once any hardware
+       memory failure happens, this feature is disabled.
 
   Note these injection interfaces are not stable and might change between
   kernel versions
index 1fc9ead..04ec80e 100644 (file)
@@ -426,7 +426,7 @@ F:  drivers/acpi/*thermal*
 ACPI VIOT DRIVER
 M:     Jean-Philippe Brucker <jean-philippe@linaro.org>
 L:     linux-acpi@vger.kernel.org
-L:     iommu@lists.linux-foundation.org
+L:     iommu@lists.linux.dev
 S:     Maintained
 F:     drivers/acpi/viot.c
 F:     include/linux/acpi_viot.h
@@ -959,7 +959,7 @@ F:  drivers/video/fbdev/geode/
 AMD IOMMU (AMD-VI)
 M:     Joerg Roedel <joro@8bytes.org>
 R:     Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
-L:     iommu@lists.linux-foundation.org
+L:     iommu@lists.linux.dev
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
 F:     drivers/iommu/amd/
@@ -1038,6 +1038,7 @@ F:        arch/arm64/boot/dts/amd/
 
 AMD XGBE DRIVER
 M:     Tom Lendacky <thomas.lendacky@amd.com>
+M:     "Shyam Sundar S K" <Shyam-sundar.S-k@amd.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     arch/arm64/boot/dts/amd/amd-seattle-xgbe*.dtsi
@@ -2467,6 +2468,7 @@ ARM/NXP S32G ARCHITECTURE
 M:     Chester Lin <clin@suse.com>
 R:     Andreas Färber <afaerber@suse.de>
 R:     Matthias Brugger <mbrugger@suse.com>
+R:     NXP S32 Linux Team <s32@nxp.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm64/boot/dts/freescale/s32g*.dts*
@@ -2496,10 +2498,8 @@ F:       drivers/power/reset/oxnas-restart.c
 N:     oxnas
 
 ARM/PALM TREO SUPPORT
-M:     Tomas Cech <sleep_walker@suse.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     Maintained
-W:     http://hackndev.com
+S:     Orphan
 F:     arch/arm/mach-pxa/palmtreo.*
 
 ARM/PALMTX,PALMT5,PALMLD,PALMTE2,PALMTC SUPPORT
@@ -2537,6 +2537,7 @@ W:        http://www.armlinux.org.uk/
 ARM/QUALCOMM SUPPORT
 M:     Andy Gross <agross@kernel.org>
 M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+R:     Konrad Dybcio <konrad.dybcio@somainline.org>
 L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux.git
@@ -3614,16 +3615,18 @@ S:      Maintained
 F:     Documentation/devicetree/bindings/iio/accel/bosch,bma400.yaml
 F:     drivers/iio/accel/bma400*
 
-BPF (Safe dynamic programs and tools)
+BPF [GENERAL] (Safe Dynamic Programs and Tools)
 M:     Alexei Starovoitov <ast@kernel.org>
 M:     Daniel Borkmann <daniel@iogearbox.net>
 M:     Andrii Nakryiko <andrii@kernel.org>
-R:     Martin KaFai Lau <kafai@fb.com>
-R:     Song Liu <songliubraving@fb.com>
+R:     Martin KaFai Lau <martin.lau@linux.dev>
+R:     Song Liu <song@kernel.org>
 R:     Yonghong Song <yhs@fb.com>
 R:     John Fastabend <john.fastabend@gmail.com>
 R:     KP Singh <kpsingh@kernel.org>
-L:     netdev@vger.kernel.org
+R:     Stanislav Fomichev <sdf@google.com>
+R:     Hao Luo <haoluo@google.com>
+R:     Jiri Olsa <jolsa@kernel.org>
 L:     bpf@vger.kernel.org
 S:     Supported
 W:     https://bpf.io/
@@ -3655,21 +3658,17 @@ F:      scripts/pahole-version.sh
 F:     tools/bpf/
 F:     tools/lib/bpf/
 F:     tools/testing/selftests/bpf/
-N:     bpf
-K:     bpf
 
 BPF JIT for ARM
 M:     Shubham Bansal <illusionist.neo@gmail.com>
-L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
-S:     Maintained
+S:     Odd Fixes
 F:     arch/arm/net/
 
 BPF JIT for ARM64
 M:     Daniel Borkmann <daniel@iogearbox.net>
 M:     Alexei Starovoitov <ast@kernel.org>
 M:     Zi Shen Lim <zlim.lnx@gmail.com>
-L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 S:     Supported
 F:     arch/arm64/net/
@@ -3677,29 +3676,26 @@ F:      arch/arm64/net/
 BPF JIT for MIPS (32-BIT AND 64-BIT)
 M:     Johan Almbladh <johan.almbladh@anyfinetworks.com>
 M:     Paul Burton <paulburton@kernel.org>
-L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 S:     Maintained
 F:     arch/mips/net/
 
 BPF JIT for NFP NICs
 M:     Jakub Kicinski <kuba@kernel.org>
-L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
-S:     Supported
+S:     Odd Fixes
 F:     drivers/net/ethernet/netronome/nfp/bpf/
 
 BPF JIT for POWERPC (32-BIT AND 64-BIT)
 M:     Naveen N. Rao <naveen.n.rao@linux.ibm.com>
-L:     netdev@vger.kernel.org
+M:     Michael Ellerman <mpe@ellerman.id.au>
 L:     bpf@vger.kernel.org
-S:     Maintained
+S:     Supported
 F:     arch/powerpc/net/
 
 BPF JIT for RISC-V (32-bit)
 M:     Luke Nelson <luke.r.nels@gmail.com>
 M:     Xi Wang <xi.wang@gmail.com>
-L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 S:     Maintained
 F:     arch/riscv/net/
@@ -3707,7 +3703,6 @@ X:        arch/riscv/net/bpf_jit_comp64.c
 
 BPF JIT for RISC-V (64-bit)
 M:     Björn Töpel <bjorn@kernel.org>
-L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 S:     Maintained
 F:     arch/riscv/net/
@@ -3717,36 +3712,80 @@ BPF JIT for S390
 M:     Ilya Leoshkevich <iii@linux.ibm.com>
 M:     Heiko Carstens <hca@linux.ibm.com>
 M:     Vasily Gorbik <gor@linux.ibm.com>
-L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
-S:     Maintained
+S:     Supported
 F:     arch/s390/net/
 X:     arch/s390/net/pnet.c
 
 BPF JIT for SPARC (32-BIT AND 64-BIT)
 M:     David S. Miller <davem@davemloft.net>
-L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
-S:     Maintained
+S:     Odd Fixes
 F:     arch/sparc/net/
 
 BPF JIT for X86 32-BIT
 M:     Wang YanQing <udknight@gmail.com>
-L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
-S:     Maintained
+S:     Odd Fixes
 F:     arch/x86/net/bpf_jit_comp32.c
 
 BPF JIT for X86 64-BIT
 M:     Alexei Starovoitov <ast@kernel.org>
 M:     Daniel Borkmann <daniel@iogearbox.net>
-L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 S:     Supported
 F:     arch/x86/net/
 X:     arch/x86/net/bpf_jit_comp32.c
 
-BPF LSM (Security Audit and Enforcement using BPF)
+BPF [CORE]
+M:     Alexei Starovoitov <ast@kernel.org>
+M:     Daniel Borkmann <daniel@iogearbox.net>
+R:     John Fastabend <john.fastabend@gmail.com>
+L:     bpf@vger.kernel.org
+S:     Maintained
+F:     kernel/bpf/verifier.c
+F:     kernel/bpf/tnum.c
+F:     kernel/bpf/core.c
+F:     kernel/bpf/syscall.c
+F:     kernel/bpf/dispatcher.c
+F:     kernel/bpf/trampoline.c
+F:     include/linux/bpf*
+F:     include/linux/filter.h
+
+BPF [BTF]
+M:     Martin KaFai Lau <martin.lau@linux.dev>
+L:     bpf@vger.kernel.org
+S:     Maintained
+F:     kernel/bpf/btf.c
+F:     include/linux/btf*
+
+BPF [TRACING]
+M:     Song Liu <song@kernel.org>
+R:     Jiri Olsa <jolsa@kernel.org>
+L:     bpf@vger.kernel.org
+S:     Maintained
+F:     kernel/trace/bpf_trace.c
+F:     kernel/bpf/stackmap.c
+
+BPF [NETWORKING] (tc BPF, sock_addr)
+M:     Martin KaFai Lau <martin.lau@linux.dev>
+M:     Daniel Borkmann <daniel@iogearbox.net>
+R:     John Fastabend <john.fastabend@gmail.com>
+L:     bpf@vger.kernel.org
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     net/core/filter.c
+F:     net/sched/act_bpf.c
+F:     net/sched/cls_bpf.c
+
+BPF [NETWORKING] (struct_ops, reuseport)
+M:     Martin KaFai Lau <martin.lau@linux.dev>
+L:     bpf@vger.kernel.org
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     kernel/bpf/bpf_struct*
+
+BPF [SECURITY & LSM] (Security Audit and Enforcement using BPF)
 M:     KP Singh <kpsingh@kernel.org>
 R:     Florent Revest <revest@chromium.org>
 R:     Brendan Jackman <jackmanb@chromium.org>
@@ -3757,13 +3796,64 @@ F:      include/linux/bpf_lsm.h
 F:     kernel/bpf/bpf_lsm.c
 F:     security/bpf/
 
-BPFTOOL
+BPF [STORAGE & CGROUPS]
+M:     Martin KaFai Lau <martin.lau@linux.dev>
+L:     bpf@vger.kernel.org
+S:     Maintained
+F:     kernel/bpf/cgroup.c
+F:     kernel/bpf/*storage.c
+F:     kernel/bpf/bpf_lru*
+
+BPF [RINGBUF]
+M:     Andrii Nakryiko <andrii@kernel.org>
+L:     bpf@vger.kernel.org
+S:     Maintained
+F:     kernel/bpf/ringbuf.c
+
+BPF [ITERATOR]
+M:     Yonghong Song <yhs@fb.com>
+L:     bpf@vger.kernel.org
+S:     Maintained
+F:     kernel/bpf/*iter.c
+
+BPF [L7 FRAMEWORK] (sockmap)
+M:     John Fastabend <john.fastabend@gmail.com>
+M:     Jakub Sitnicki <jakub@cloudflare.com>
+L:     netdev@vger.kernel.org
+L:     bpf@vger.kernel.org
+S:     Maintained
+F:     include/linux/skmsg.h
+F:     net/core/skmsg.c
+F:     net/core/sock_map.c
+F:     net/ipv4/tcp_bpf.c
+F:     net/ipv4/udp_bpf.c
+F:     net/unix/unix_bpf.c
+
+BPF [LIBRARY] (libbpf)
+M:     Andrii Nakryiko <andrii@kernel.org>
+L:     bpf@vger.kernel.org
+S:     Maintained
+F:     tools/lib/bpf/
+
+BPF [TOOLING] (bpftool)
 M:     Quentin Monnet <quentin@isovalent.com>
 L:     bpf@vger.kernel.org
 S:     Maintained
 F:     kernel/bpf/disasm.*
 F:     tools/bpf/bpftool/
 
+BPF [SELFTESTS] (Test Runners & Infrastructure)
+M:     Andrii Nakryiko <andrii@kernel.org>
+R:     Mykola Lysenko <mykolal@fb.com>
+L:     bpf@vger.kernel.org
+S:     Maintained
+F:     tools/testing/selftests/bpf/
+
+BPF [MISC]
+L:     bpf@vger.kernel.org
+S:     Odd Fixes
+K:     (?:\b|_)bpf(?:\b|_)
+
 BROADCOM B44 10/100 ETHERNET DRIVER
 M:     Michael Chan <michael.chan@broadcom.com>
 L:     netdev@vger.kernel.org
@@ -3796,12 +3886,12 @@ N:      bcmbca
 N:     bcm[9]?47622
 
 BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE
-M:     Nicolas Saenz Julienne <nsaenz@kernel.org>
+M:     Florian Fainelli <f.fainelli@gmail.com>
 R:     Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nsaenz/linux-rpi.git
+T:     git git://github.com/broadcom/stblinux.git
 F:     Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
 F:     drivers/pci/controller/pcie-brcmstb.c
 F:     drivers/staging/vc04_services
@@ -4959,6 +5049,7 @@ Q:        http://patchwork.kernel.org/project/linux-clk/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
 F:     Documentation/devicetree/bindings/clock/
 F:     drivers/clk/
+F:     include/dt-bindings/clock/
 F:     include/linux/clk-pr*
 F:     include/linux/clk/
 F:     include/linux/of_clk.h
@@ -5009,7 +5100,7 @@ COMPUTE EXPRESS LINK (CXL)
 M:     Alison Schofield <alison.schofield@intel.com>
 M:     Vishal Verma <vishal.l.verma@intel.com>
 M:     Ira Weiny <ira.weiny@intel.com>
-M:     Ben Widawsky <ben.widawsky@intel.com>
+M:     Ben Widawsky <bwidawsk@kernel.org>
 M:     Dan Williams <dan.j.williams@intel.com>
 L:     linux-cxl@vger.kernel.org
 S:     Maintained
@@ -5961,7 +6052,7 @@ DMA MAPPING HELPERS
 M:     Christoph Hellwig <hch@lst.de>
 M:     Marek Szyprowski <m.szyprowski@samsung.com>
 R:     Robin Murphy <robin.murphy@arm.com>
-L:     iommu@lists.linux-foundation.org
+L:     iommu@lists.linux.dev
 S:     Supported
 W:     http://git.infradead.org/users/hch/dma-mapping.git
 T:     git git://git.infradead.org/users/hch/dma-mapping.git
@@ -5973,7 +6064,7 @@ F:        kernel/dma/
 
 DMA MAPPING BENCHMARK
 M:     Xiang Chen <chenxiang66@hisilicon.com>
-L:     iommu@lists.linux-foundation.org
+L:     iommu@lists.linux.dev
 F:     kernel/dma/map_benchmark.c
 F:     tools/testing/selftests/dma/
 
@@ -7394,6 +7485,8 @@ F:        include/video/s1d13xxxfb.h
 EROFS FILE SYSTEM
 M:     Gao Xiang <xiang@kernel.org>
 M:     Chao Yu <chao@kernel.org>
+R:     Yue Hu <huyue2@coolpad.com>
+R:     Jeffle Xu <jefflexu@linux.alibaba.com>
 L:     linux-erofs@lists.ozlabs.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git
@@ -7557,7 +7650,7 @@ F:        drivers/gpu/drm/exynos/exynos_dp*
 
 EXYNOS SYSMMU (IOMMU) driver
 M:     Marek Szyprowski <m.szyprowski@samsung.com>
-L:     iommu@lists.linux-foundation.org
+L:     iommu@lists.linux.dev
 S:     Maintained
 F:     drivers/iommu/exynos-iommu.c
 
@@ -8479,6 +8572,7 @@ F:        Documentation/devicetree/bindings/gpio/
 F:     Documentation/driver-api/gpio/
 F:     drivers/gpio/
 F:     include/asm-generic/gpio.h
+F:     include/dt-bindings/gpio/
 F:     include/linux/gpio.h
 F:     include/linux/gpio/
 F:     include/linux/of_gpio.h
@@ -8944,6 +9038,12 @@ F:       Documentation/admin-guide/perf/hisi-pcie-pmu.rst
 F:     Documentation/admin-guide/perf/hisi-pmu.rst
 F:     drivers/perf/hisilicon
 
+HISILICON HNS3 PMU DRIVER
+M:     Guangbin Huang <huangguangbin2@huawei.com>
+S:     Supported
+F:     Documentation/admin-guide/perf/hns3-pmu.rst
+F:     drivers/perf/hisilicon/hns3_pmu.c
+
 HISILICON QM AND ZIP Controller DRIVER
 M:     Zhou Wang <wangzhou1@hisilicon.com>
 L:     linux-crypto@vger.kernel.org
@@ -9132,6 +9232,7 @@ F:        drivers/media/platform/st/sti/hva
 
 HWPOISON MEMORY FAILURE HANDLING
 M:     Naoya Horiguchi <naoya.horiguchi@nec.com>
+R:     Miaohe Lin <linmiaohe@huawei.com>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/hwpoison-inject.c
@@ -9276,6 +9377,7 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git
 F:     Documentation/devicetree/bindings/i2c/i2c.txt
 F:     Documentation/i2c/
 F:     drivers/i2c/*
+F:     include/dt-bindings/i2c/i2c.h
 F:     include/linux/i2c-dev.h
 F:     include/linux/i2c-smbus.h
 F:     include/linux/i2c.h
@@ -9291,6 +9393,7 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git
 F:     Documentation/devicetree/bindings/i2c/
 F:     drivers/i2c/algos/
 F:     drivers/i2c/busses/
+F:     include/dt-bindings/i2c/
 
 I2C-TAOS-EVM DRIVER
 M:     Jean Delvare <jdelvare@suse.com>
@@ -9523,6 +9626,7 @@ F:        drivers/input/misc/ideapad_slidebar.c
 
 IDMAPPED MOUNTS
 M:     Christian Brauner <brauner@kernel.org>
+M:     Seth Forshee <sforshee@kernel.org>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git
@@ -9811,7 +9915,10 @@ INTEL ASoC DRIVERS
 M:     Cezary Rojewski <cezary.rojewski@intel.com>
 M:     Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
 M:     Liam Girdwood <liam.r.girdwood@linux.intel.com>
-M:     Jie Yang <yang.jie@linux.intel.com>
+M:     Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+M:     Bard Liao <yung-chuan.liao@linux.intel.com>
+M:     Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+M:     Kai Vehmanen <kai.vehmanen@linux.intel.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
 F:     sound/soc/intel/
@@ -9974,7 +10081,7 @@ F:       drivers/hid/intel-ish-hid/
 INTEL IOMMU (VT-d)
 M:     David Woodhouse <dwmw2@infradead.org>
 M:     Lu Baolu <baolu.lu@linux.intel.com>
-L:     iommu@lists.linux-foundation.org
+L:     iommu@lists.linux.dev
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
 F:     drivers/iommu/intel/
@@ -10353,7 +10460,7 @@ F:      include/linux/iomap.h
 IOMMU DRIVERS
 M:     Joerg Roedel <joro@8bytes.org>
 M:     Will Deacon <will@kernel.org>
-L:     iommu@lists.linux-foundation.org
+L:     iommu@lists.linux.dev
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
 F:     Documentation/devicetree/bindings/iommu/
@@ -10830,6 +10937,7 @@ M:      Marc Zyngier <maz@kernel.org>
 R:     James Morse <james.morse@arm.com>
 R:     Alexandru Elisei <alexandru.elisei@arm.com>
 R:     Suzuki K Poulose <suzuki.poulose@arm.com>
+R:     Oliver Upton <oliver.upton@linux.dev>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     kvmarm@lists.cs.columbia.edu (moderated for non-subscribers)
 S:     Maintained
@@ -10872,7 +10980,6 @@ F:      arch/riscv/include/asm/kvm*
 F:     arch/riscv/include/uapi/asm/kvm*
 F:     arch/riscv/kvm/
 F:     tools/testing/selftests/kvm/*/riscv/
-F:     tools/testing/selftests/kvm/riscv/
 
 KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
 M:     Christian Borntraeger <borntraeger@linux.ibm.com>
@@ -10897,28 +11004,51 @@ F:    tools/testing/selftests/kvm/*/s390x/
 F:     tools/testing/selftests/kvm/s390x/
 
 KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
+M:     Sean Christopherson <seanjc@google.com>
 M:     Paolo Bonzini <pbonzini@redhat.com>
-R:     Sean Christopherson <seanjc@google.com>
-R:     Vitaly Kuznetsov <vkuznets@redhat.com>
-R:     Wanpeng Li <wanpengli@tencent.com>
-R:     Jim Mattson <jmattson@google.com>
-R:     Joerg Roedel <joro@8bytes.org>
 L:     kvm@vger.kernel.org
 S:     Supported
-W:     http://www.linux-kvm.org
 T:     git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
 F:     arch/x86/include/asm/kvm*
-F:     arch/x86/include/asm/pvclock-abi.h
 F:     arch/x86/include/asm/svm.h
 F:     arch/x86/include/asm/vmx*.h
 F:     arch/x86/include/uapi/asm/kvm*
 F:     arch/x86/include/uapi/asm/svm.h
 F:     arch/x86/include/uapi/asm/vmx.h
-F:     arch/x86/kernel/kvm.c
-F:     arch/x86/kernel/kvmclock.c
 F:     arch/x86/kvm/
 F:     arch/x86/kvm/*/
 
+KVM PARAVIRT (KVM/paravirt)
+M:     Paolo Bonzini <pbonzini@redhat.com>
+R:     Wanpeng Li <wanpengli@tencent.com>
+R:     Vitaly Kuznetsov <vkuznets@redhat.com>
+L:     kvm@vger.kernel.org
+S:     Supported
+T:     git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
+F:     arch/x86/kernel/kvm.c
+F:     arch/x86/kernel/kvmclock.c
+F:     arch/x86/include/asm/pvclock-abi.h
+F:     include/linux/kvm_para.h
+F:     include/uapi/linux/kvm_para.h
+F:     include/uapi/asm-generic/kvm_para.h
+F:     include/asm-generic/kvm_para.h
+F:     arch/um/include/asm/kvm_para.h
+F:     arch/x86/include/asm/kvm_para.h
+F:     arch/x86/include/uapi/asm/kvm_para.h
+
+KVM X86 HYPER-V (KVM/hyper-v)
+M:     Vitaly Kuznetsov <vkuznets@redhat.com>
+M:     Sean Christopherson <seanjc@google.com>
+M:     Paolo Bonzini <pbonzini@redhat.com>
+L:     kvm@vger.kernel.org
+S:     Supported
+T:     git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
+F:     arch/x86/kvm/hyperv.*
+F:     arch/x86/kvm/kvm_onhyperv.*
+F:     arch/x86/kvm/svm/hyperv.*
+F:     arch/x86/kvm/svm/svm_onhyperv.*
+F:     arch/x86/kvm/vmx/evmcs.*
+
 KERNFS
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 M:     Tejun Heo <tj@kernel.org>
@@ -11097,20 +11227,6 @@ S:     Maintained
 F:     include/net/l3mdev.h
 F:     net/l3mdev
 
-L7 BPF FRAMEWORK
-M:     John Fastabend <john.fastabend@gmail.com>
-M:     Daniel Borkmann <daniel@iogearbox.net>
-M:     Jakub Sitnicki <jakub@cloudflare.com>
-L:     netdev@vger.kernel.org
-L:     bpf@vger.kernel.org
-S:     Maintained
-F:     include/linux/skmsg.h
-F:     net/core/skmsg.c
-F:     net/core/sock_map.c
-F:     net/ipv4/tcp_bpf.c
-F:     net/ipv4/udp_bpf.c
-F:     net/unix/unix_bpf.c
-
 LANDLOCK SECURITY MODULE
 M:     Mickaël Salaün <mic@digikod.net>
 L:     linux-security-module@vger.kernel.org
@@ -11590,6 +11706,7 @@ F:      drivers/gpu/drm/bridge/lontium-lt8912b.c
 LOONGARCH
 M:     Huacai Chen <chenhuacai@kernel.org>
 R:     WANG Xuerui <kernel@xen0n.name>
+L:     loongarch@lists.linux.dev
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson.git
 F:     arch/loongarch/
@@ -12502,7 +12619,7 @@ F:      drivers/i2c/busses/i2c-mt65xx.c
 
 MEDIATEK IOMMU DRIVER
 M:     Yong Wu <yong.wu@mediatek.com>
-L:     iommu@lists.linux-foundation.org
+L:     iommu@lists.linux.dev
 L:     linux-mediatek@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 F:     Documentation/devicetree/bindings/iommu/mediatek*
@@ -12845,9 +12962,8 @@ M:      Andrew Morton <akpm@linux-foundation.org>
 L:     linux-mm@kvack.org
 S:     Maintained
 W:     http://www.linux-mm.org
-T:     quilt https://ozlabs.org/~akpm/mmotm/
-T:     quilt https://ozlabs.org/~akpm/mmots/
-T:     git git://github.com/hnaz/linux-mm.git
+T:     git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+T:     quilt git://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new
 F:     include/linux/gfp.h
 F:     include/linux/memory_hotplug.h
 F:     include/linux/mm.h
@@ -12857,6 +12973,18 @@ F:     include/linux/vmalloc.h
 F:     mm/
 F:     tools/testing/selftests/vm/
 
+MEMORY HOT(UN)PLUG
+M:     David Hildenbrand <david@redhat.com>
+M:     Oscar Salvador <osalvador@suse.de>
+L:     linux-mm@kvack.org
+S:     Maintained
+F:     Documentation/admin-guide/mm/memory-hotplug.rst
+F:     Documentation/core-api/memory-hotplug.rst
+F:     drivers/base/memory.c
+F:     include/linux/memory_hotplug.h
+F:     mm/memory_hotplug.c
+F:     tools/testing/selftests/memory-hotplug/
+
 MEMORY TECHNOLOGY DEVICES (MTD)
 M:     Miquel Raynal <miquel.raynal@bootlin.com>
 M:     Richard Weinberger <richard@nod.at>
@@ -13801,6 +13929,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
 F:     Documentation/devicetree/bindings/net/
 F:     drivers/connector/
 F:     drivers/net/
+F:     include/dt-bindings/net/
 F:     include/linux/etherdevice.h
 F:     include/linux/fcdevice.h
 F:     include/linux/fddidevice.h
@@ -13952,7 +14081,6 @@ F:      net/ipv6/tcp*.c
 NETWORKING [TLS]
 M:     Boris Pismenny <borisp@nvidia.com>
 M:     John Fastabend <john.fastabend@gmail.com>
-M:     Daniel Borkmann <daniel@iogearbox.net>
 M:     Jakub Kicinski <kuba@kernel.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -14243,7 +14371,8 @@ S:      Maintained
 F:     drivers/net/phy/nxp-c45-tja11xx.c
 
 NXP FSPI DRIVER
-M:     Ashish Kumar <ashish.kumar@nxp.com>
+M:     Han Xu <han.xu@nxp.com>
+M:     Haibo Chen <haibo.chen@nxp.com>
 R:     Yogesh Gaur <yogeshgaur.83@gmail.com>
 L:     linux-spi@vger.kernel.org
 S:     Maintained
@@ -14261,7 +14390,7 @@ F:      drivers/iio/gyro/fxas21002c_i2c.c
 F:     drivers/iio/gyro/fxas21002c_spi.c
 
 NXP i.MX CLOCK DRIVERS
-M:     Abel Vesa <abel.vesa@nxp.com>
+M:     Abel Vesa <abelvesa@kernel.org>
 L:     linux-clk@vger.kernel.org
 L:     linux-imx@nxp.com
 S:     Maintained
@@ -14349,9 +14478,8 @@ F:      Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml
 F:     sound/soc/codecs/tfa989x.c
 
 NXP-NCI NFC DRIVER
-R:     Charles Gorand <charles.gorand@effinnov.com>
 L:     linux-nfc@lists.01.org (subscribers-only)
-S:     Supported
+S:     Orphan
 F:     Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml
 F:     drivers/nfc/nxp-nci
 
@@ -14869,6 +14997,7 @@ F:      include/dt-bindings/
 
 OPENCOMPUTE PTP CLOCK DRIVER
 M:     Jonathan Lemon <jonathan.lemon@gmail.com>
+M:     Vadim Fedorenko <vadfed@fb.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/ptp/ptp_ocp.c
@@ -15729,7 +15858,7 @@ PIN CONTROLLER - FREESCALE
 M:     Dong Aisheng <aisheng.dong@nxp.com>
 M:     Fabio Estevam <festevam@gmail.com>
 M:     Shawn Guo <shawnguo@kernel.org>
-M:     Stefan Agner <stefan@agner.ch>
+M:     Jacky Bai <ping.bai@nxp.com>
 R:     Pengutronix Kernel Team <kernel@pengutronix.de>
 L:     linux-gpio@vger.kernel.org
 S:     Maintained
@@ -15739,7 +15868,7 @@ F:      drivers/pinctrl/freescale/
 PIN CONTROLLER - INTEL
 M:     Mika Westerberg <mika.westerberg@linux.intel.com>
 M:     Andy Shevchenko <andy@kernel.org>
-S:     Maintained
+S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git
 F:     drivers/pinctrl/intel/
 
@@ -16261,7 +16390,7 @@ F:      drivers/crypto/qat/
 
 QCOM AUDIO (ASoC) DRIVERS
 M:     Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
-M:     Banajit Goswami <bgoswami@codeaurora.org>
+M:     Banajit Goswami <bgoswami@quicinc.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
 F:     sound/soc/codecs/lpass-va-macro.c
@@ -16488,7 +16617,7 @@ F:      Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml
 F:     drivers/cpufreq/qcom-cpufreq-nvmem.c
 
 QUALCOMM CRYPTO DRIVERS
-M:     Thara Gopinath <thara.gopinath@linaro.org>
+M:     Thara Gopinath <thara.gopinath@gmail.com>
 L:     linux-crypto@vger.kernel.org
 L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
@@ -16542,7 +16671,7 @@ F:      drivers/i2c/busses/i2c-qcom-cci.c
 
 QUALCOMM IOMMU
 M:     Rob Clark <robdclark@gmail.com>
-L:     iommu@lists.linux-foundation.org
+L:     iommu@lists.linux.dev
 L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
 F:     drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -16598,7 +16727,7 @@ F:      include/linux/if_rmnet.h
 
 QUALCOMM TSENS THERMAL DRIVER
 M:     Amit Kucheria <amitk@kernel.org>
-M:     Thara Gopinath <thara.gopinath@linaro.org>
+M:     Thara Gopinath <thara.gopinath@gmail.com>
 L:     linux-pm@vger.kernel.org
 L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
@@ -17153,12 +17282,15 @@ N:    riscv
 K:     riscv
 
 RISC-V/MICROCHIP POLARFIRE SOC SUPPORT
-M:     Lewis Hanly <lewis.hanly@microchip.com>
 M:     Conor Dooley <conor.dooley@microchip.com>
+M:     Daire McNamara <daire.mcnamara@microchip.com>
 L:     linux-riscv@lists.infradead.org
 S:     Supported
 F:     arch/riscv/boot/dts/microchip/
+F:     drivers/char/hw_random/mpfs-rng.c
+F:     drivers/clk/microchip/clk-mpfs.c
 F:     drivers/mailbox/mailbox-mpfs.c
+F:     drivers/pci/controller/pcie-microchip-host.c
 F:     drivers/soc/microchip/
 F:     include/soc/microchip/mpfs.h
 
@@ -18055,6 +18187,7 @@ F:      drivers/misc/sgi-xp/
 
 SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
 M:     Karsten Graul <kgraul@linux.ibm.com>
+M:     Wenjia Zhang <wenjia@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -18687,8 +18820,10 @@ F:     sound/soc/
 SOUND - SOUND OPEN FIRMWARE (SOF) DRIVERS
 M:     Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
 M:     Liam Girdwood <lgirdwood@gmail.com>
+M:     Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+M:     Bard Liao <yung-chuan.liao@linux.intel.com>
 M:     Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
-M:     Kai Vehmanen <kai.vehmanen@linux.intel.com>
+R:     Kai Vehmanen <kai.vehmanen@linux.intel.com>
 M:     Daniel Baluta <daniel.baluta@nxp.com>
 L:     sound-open-firmware@alsa-project.org (moderated for non-subscribers)
 S:     Supported
@@ -19167,7 +19302,7 @@ F:      arch/x86/boot/video*
 
 SWIOTLB SUBSYSTEM
 M:     Christoph Hellwig <hch@infradead.org>
-L:     iommu@lists.linux-foundation.org
+L:     iommu@lists.linux.dev
 S:     Supported
 W:     http://git.infradead.org/users/hch/dma-mapping.git
 T:     git git://git.infradead.org/users/hch/dma-mapping.git
@@ -19305,7 +19440,7 @@ R:      Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 R:     Mika Westerberg <mika.westerberg@linux.intel.com>
 R:     Jan Dabros <jsd@semihalf.com>
 L:     linux-i2c@vger.kernel.org
-S:     Maintained
+S:     Supported
 F:     drivers/i2c/busses/i2c-designware-*
 
 SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
@@ -20712,6 +20847,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git
 F:     Documentation/devicetree/bindings/usb/
 F:     Documentation/usb/
 F:     drivers/usb/
+F:     include/dt-bindings/usb/
 F:     include/linux/usb.h
 F:     include/linux/usb/
 
@@ -21841,7 +21977,7 @@ XEN SWIOTLB SUBSYSTEM
 M:     Juergen Gross <jgross@suse.com>
 M:     Stefano Stabellini <sstabellini@kernel.org>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
-L:     iommu@lists.linux-foundation.org
+L:     iommu@lists.linux.dev
 S:     Supported
 F:     arch/x86/xen/*swiotlb*
 F:     drivers/xen/*swiotlb*
index 1a6678d..df92892 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION =
 NAME = Superb Owl
 
 # *DOCUMENTATION*
@@ -1141,7 +1141,7 @@ KBUILD_MODULES := 1
 
 autoksyms_recursive: descend modules.order
        $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \
-         "$(MAKE) -f $(srctree)/Makefile vmlinux"
+         "$(MAKE) -f $(srctree)/Makefile autoksyms_recursive"
 endif
 
 autoksyms_h := $(if $(CONFIG_TRIM_UNUSED_KSYMS), include/generated/autoksyms.h)
index fcf9a41..5ea3e38 100644 (file)
@@ -223,6 +223,9 @@ config HAVE_FUNCTION_DESCRIPTORS
 config TRACE_IRQFLAGS_SUPPORT
        bool
 
+config TRACE_IRQFLAGS_NMI_SUPPORT
+       bool
+
 #
 # An arch should select this if it provides all these things:
 #
@@ -438,6 +441,13 @@ config MMU_GATHER_PAGE_SIZE
 
 config MMU_GATHER_NO_RANGE
        bool
+       select MMU_GATHER_MERGE_VMAS
+
+config MMU_GATHER_NO_FLUSH_CACHE
+       bool
+
+config MMU_GATHER_MERGE_VMAS
+       bool
 
 config MMU_GATHER_NO_GATHER
        bool
index 1848998..5112f49 100644 (file)
@@ -1586,7 +1586,6 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
        aspeed-bmc-lenovo-hr630.dtb \
        aspeed-bmc-lenovo-hr855xg2.dtb \
        aspeed-bmc-microsoft-olympus.dtb \
-       aspeed-bmc-nuvia-dc-scm.dtb \
        aspeed-bmc-opp-lanyang.dtb \
        aspeed-bmc-opp-mihawk.dtb \
        aspeed-bmc-opp-mowgli.dtb \
@@ -1599,6 +1598,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
        aspeed-bmc-opp-witherspoon.dtb \
        aspeed-bmc-opp-zaius.dtb \
        aspeed-bmc-portwell-neptune.dtb \
+       aspeed-bmc-qcom-dc-scm-v1.dtb \
        aspeed-bmc-quanta-q71l.dtb \
        aspeed-bmc-quanta-s6q.dtb \
        aspeed-bmc-supermicro-x11spi.dtb \
@@ -6,8 +6,8 @@
 #include "aspeed-g6.dtsi"
 
 / {
-       model = "Nuvia DC-SCM BMC";
-       compatible = "nuvia,dc-scm-bmc", "aspeed,ast2600";
+       model = "Qualcomm DC-SCM V1 BMC";
+       compatible = "qcom,dc-scm-v1-bmc", "aspeed,ast2600";
 
        aliases {
                serial4 = &uart5;
index 7719ea3..81ccb06 100644 (file)
                status = "okay";
 
                eeprom@53 {
-                       compatible = "atmel,24c32";
+                       compatible = "atmel,24c02";
                        reg = <0x53>;
                        pagesize = <16>;
-                       size = <128>;
                        status = "okay";
                };
        };
index 806eb1d..164201a 100644 (file)
        status = "okay";
 
        eeprom@50 {
-               compatible = "atmel,24c32";
+               compatible = "atmel,24c02";
                reg = <0x50>;
                pagesize = <16>;
                status = "okay";
        };
 
        eeprom@52 {
-               compatible = "atmel,24c32";
+               compatible = "atmel,24c02";
                reg = <0x52>;
                pagesize = <16>;
                status = "disabled";
        };
 
        eeprom@53 {
-               compatible = "atmel,24c32";
+               compatible = "atmel,24c02";
                reg = <0x53>;
                pagesize = <16>;
                status = "disabled";
index 443e8b0..14af1fd 100644 (file)
                        port@0 {
                                reg = <0>;
                                label = "lan1";
+                               phy-mode = "internal";
                        };
 
                        port@1 {
                                reg = <1>;
                                label = "lan2";
+                               phy-mode = "internal";
                        };
 
                        port@2 {
                                reg = <2>;
                                label = "lan3";
+                               phy-mode = "internal";
                        };
 
                        port@3 {
                                reg = <3>;
                                label = "lan4";
+                               phy-mode = "internal";
                        };
 
                        port@4 {
                                reg = <4>;
                                label = "lan5";
+                               phy-mode = "internal";
                        };
 
                        port@5 {
index f4d2fc2..c53d9eb 100644 (file)
 &expgpio {
        gpio-line-names = "BT_ON",
                          "WL_ON",
-                         "",
+                         "PWR_LED_OFF",
                          "GLOBAL_RESET",
                          "VDD_SD_IO_SEL",
-                         "CAM_GPIO",
+                         "GLOBAL_SHUTDOWN",
                          "SD_PWR_ON",
-                         "SD_OC_N";
+                         "SHUTDOWN_REQUEST";
 };
 
 &genet_mdio {
index c383e0e..7df270c 100644 (file)
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_atmel_conn>;
                reg = <0x4a>;
-               reset-gpios = <&gpio1 14 GPIO_ACTIVE_HIGH>;     /* SODIMM 106 */
+               reset-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;      /* SODIMM 106 */
                status = "disabled";
        };
 };
index fded07f..d6ba4b2 100644 (file)
                reg = <0x28>;
                #gpio-cells = <2>;
                gpio-controller;
-               ngpio = <32>;
+               ngpios = <62>;
        };
 
        sgtl5000: codec@a {
index d27beb4..652feff 100644 (file)
                                        regulator-name = "vddpu";
                                        regulator-min-microvolt = <725000>;
                                        regulator-max-microvolt = <1450000>;
-                                       regulator-enable-ramp-delay = <150>;
+                                       regulator-enable-ramp-delay = <380>;
                                        anatop-reg-offset = <0x140>;
                                        anatop-vol-bit-shift = <9>;
                                        anatop-vol-bit-width = <5>;
index 15621e0..2c3ae71 100644 (file)
        atmel_mxt_ts: touchscreen@4a {
                compatible = "atmel,maxtouch";
                pinctrl-names = "default";
-               pinctrl-0 = <&pinctrl_atmel_conn>;
+               pinctrl-0 = <&pinctrl_atmel_conn &pinctrl_atmel_snvs_conn>;
                reg = <0x4a>;
                interrupt-parent = <&gpio5>;
                interrupts = <4 IRQ_TYPE_EDGE_FALLING>;       /* SODIMM 107 / INT */
        pinctrl_atmel_conn: atmelconngrp {
                fsl,pins = <
                        MX6UL_PAD_JTAG_MOD__GPIO1_IO10          0xb0a0  /* SODIMM 106 */
-                       MX6ULL_PAD_SNVS_TAMPER4__GPIO5_IO04     0xb0a0  /* SODIMM 107 */
                >;
        };
 
 };
 
 &iomuxc_snvs {
+       pinctrl_atmel_snvs_conn: atmelsnvsconngrp {
+               fsl,pins = <
+                       MX6ULL_PAD_SNVS_TAMPER4__GPIO5_IO04     0xb0a0  /* SODIMM 107 */
+               >;
+       };
+
        pinctrl_snvs_gpio1: snvsgpio1grp {
                fsl,pins = <
                        MX6ULL_PAD_SNVS_TAMPER6__GPIO5_IO06     0x110a0 /* SODIMM 93 */
index c6b3206..21b509c 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc2>;
        bus-width = <4>;
+       no-1-8-v;
        non-removable;
-       cap-sd-highspeed;
-       sd-uhs-ddr50;
-       mmc-ddr-1_8v;
        vmmc-supply = <&reg_wifi>;
        enable-sdio-wakeup;
        status = "okay";
index 008e3da..039eed7 100644 (file)
                compatible = "usb-nop-xceiv";
                clocks = <&clks IMX7D_USB_HSIC_ROOT_CLK>;
                clock-names = "main_clk";
+               power-domains = <&pgc_hsic_phy>;
                #phy-cells = <0>;
        };
 
                                compatible = "fsl,imx7d-usb", "fsl,imx27-usb";
                                reg = <0x30b30000 0x200>;
                                interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
-                               power-domains = <&pgc_hsic_phy>;
                                clocks = <&clks IMX7D_USB_CTRL_CLK>;
                                fsl,usbphy = <&usbphynop3>;
                                fsl,usbmisc = <&usbmisc3 0>;
index 4cab1b3..725dcf7 100644 (file)
 
        phy4: ethernet-phy@5 {
                reg = <5>;
-               coma-mode-gpios = <&gpio 37 GPIO_ACTIVE_HIGH>;
+               coma-mode-gpios = <&gpio 37 GPIO_OPEN_DRAIN>;
        };
 
        phy5: ethernet-phy@6 {
                reg = <6>;
-               coma-mode-gpios = <&gpio 37 GPIO_ACTIVE_HIGH>;
+               coma-mode-gpios = <&gpio 37 GPIO_OPEN_DRAIN>;
        };
 
        phy6: ethernet-phy@7 {
                reg = <7>;
-               coma-mode-gpios = <&gpio 37 GPIO_ACTIVE_HIGH>;
+               coma-mode-gpios = <&gpio 37 GPIO_OPEN_DRAIN>;
        };
 
        phy7: ethernet-phy@8 {
                reg = <8>;
-               coma-mode-gpios = <&gpio 37 GPIO_ACTIVE_HIGH>;
+               coma-mode-gpios = <&gpio 37 GPIO_OPEN_DRAIN>;
        };
 };
 
index 3cb02ff..38e90a3 100644 (file)
@@ -38,7 +38,7 @@
                sys_clk: sys_clk {
                        compatible = "fixed-clock";
                        #clock-cells = <0>;
-                       clock-frequency = <162500000>;
+                       clock-frequency = <165625000>;
                };
 
                cpu_clk: cpu_clk {
index 814ad0b..c3b8a6d 100644 (file)
                        interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>;
                        clock-names = "core", "iface";
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&blsp1_uart2_default>;
                        status = "disabled";
                };
 
                        interrupts = <GIC_SPI 113 IRQ_TYPE_NONE>;
                        clocks = <&gcc GCC_BLSP2_UART1_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>;
                        clock-names = "core", "iface";
+                       pinctrl-names = "default", "sleep";
+                       pinctrl-0 = <&blsp2_uart1_default>;
+                       pinctrl-1 = <&blsp2_uart1_sleep>;
                        status = "disabled";
                };
 
                        interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&gcc GCC_BLSP2_UART4_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>;
                        clock-names = "core", "iface";
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&blsp2_uart4_default>;
                        status = "disabled";
                };
 
                        interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&gcc GCC_BLSP2_QUP6_I2C_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>;
                        clock-names = "core", "iface";
+                       pinctrl-names = "default", "sleep";
+                       pinctrl-0 = <&blsp2_i2c6_default>;
+                       pinctrl-1 = <&blsp2_i2c6_sleep>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                };
                                };
                        };
 
-                       blsp1_uart2_active: blsp1-uart2-active {
+                       blsp1_uart2_default: blsp1-uart2-default {
                                rx {
                                        pins = "gpio5";
                                        function = "blsp_uart2";
                                };
                        };
 
-                       blsp2_uart1_active: blsp2-uart1-active {
+                       blsp2_uart1_default: blsp2-uart1-default {
                                tx-rts {
                                        pins = "gpio41", "gpio44";
                                        function = "blsp_uart7";
                                bias-pull-down;
                        };
 
-                       blsp2_uart4_active: blsp2-uart4-active {
+                       blsp2_uart4_default: blsp2-uart4-default {
                                tx-rts {
                                        pins = "gpio53", "gpio56";
                                        function = "blsp_uart10";
                                bias-pull-up;
                        };
 
-                       /* BLSP2_I2C6 info is missing - nobody uses it though? */
+                       blsp2_i2c6_default: blsp2-i2c6-default {
+                               pins = "gpio87", "gpio88";
+                               function = "blsp_i2c12";
+                               drive-strength = <2>;
+                               bias-disable;
+                       };
+
+                       blsp2_i2c6_sleep: blsp2-i2c6-sleep {
+                               pins = "gpio87", "gpio88";
+                               function = "blsp_i2c12";
+                               drive-strength = <2>;
+                               bias-pull-up;
+                       };
 
                        spi8_default: spi8_default {
                                mosi {
index 89c71d4..659a17f 100644 (file)
                                clocks = <&pmc PMC_TYPE_PERIPHERAL 55>, <&pmc PMC_TYPE_GCK 55>;
                                clock-names = "pclk", "gclk";
                                assigned-clocks = <&pmc PMC_TYPE_CORE PMC_I2S1_MUX>;
-                               assigned-parrents = <&pmc PMC_TYPE_GCK 55>;
+                               assigned-clock-parents = <&pmc PMC_TYPE_GCK 55>;
                                status = "disabled";
                        };
 
diff --git a/arch/arm/boot/dts/stm32mp15-scmi.dtsi b/arch/arm/boot/dts/stm32mp15-scmi.dtsi
new file mode 100644 (file)
index 0000000..543f24c
--- /dev/null
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2022 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
+ */
+
+/ {
+       firmware {
+               optee: optee {
+                       compatible = "linaro,optee-tz";
+                       method = "smc";
+               };
+
+               scmi: scmi {
+                       compatible = "linaro,scmi-optee";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       linaro,optee-channel-id = <0>;
+                       shmem = <&scmi_shm>;
+
+                       scmi_clk: protocol@14 {
+                               reg = <0x14>;
+                               #clock-cells = <1>;
+                       };
+
+                       scmi_reset: protocol@16 {
+                               reg = <0x16>;
+                               #reset-cells = <1>;
+                       };
+
+                       scmi_voltd: protocol@17 {
+                               reg = <0x17>;
+
+                               scmi_reguls: regulators {
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+
+                                       scmi_reg11: reg11@0 {
+                                               reg = <0>;
+                                               regulator-name = "reg11";
+                                               regulator-min-microvolt = <1100000>;
+                                               regulator-max-microvolt = <1100000>;
+                                       };
+
+                                       scmi_reg18: reg18@1 {
+                                               voltd-name = "reg18";
+                                               reg = <1>;
+                                               regulator-name = "reg18";
+                                               regulator-min-microvolt = <1800000>;
+                                               regulator-max-microvolt = <1800000>;
+                                       };
+
+                                       scmi_usb33: usb33@2 {
+                                               reg = <2>;
+                                               regulator-name = "usb33";
+                                               regulator-min-microvolt = <3300000>;
+                                               regulator-max-microvolt = <3300000>;
+                                       };
+                               };
+                       };
+               };
+       };
+
+       soc {
+               scmi_sram: sram@2ffff000 {
+                       compatible = "mmio-sram";
+                       reg = <0x2ffff000 0x1000>;
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       ranges = <0 0x2ffff000 0x1000>;
+
+                       scmi_shm: scmi-sram@0 {
+                               compatible = "arm,scmi-shmem";
+                               reg = <0 0x80>;
+                       };
+               };
+       };
+};
+
+&reg11 {
+       status = "disabled";
+};
+
+&reg18 {
+       status = "disabled";
+};
+
+&usb33 {
+       status = "disabled";
+};
+
+&usbotg_hs {
+       usb33d-supply = <&scmi_usb33>;
+};
+
+&usbphyc {
+       vdda1v1-supply = <&scmi_reg11>;
+       vdda1v8-supply = <&scmi_reg18>;
+};
+
+/delete-node/ &clk_hse;
+/delete-node/ &clk_hsi;
+/delete-node/ &clk_lse;
+/delete-node/ &clk_lsi;
+/delete-node/ &clk_csi;
index 1b2fd34..e04dda5 100644 (file)
                status = "disabled";
        };
 
-       firmware {
-               optee: optee {
-                       compatible = "linaro,optee-tz";
-                       method = "smc";
-                       status = "disabled";
-               };
-
-               scmi: scmi {
-                       compatible = "linaro,scmi-optee";
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       linaro,optee-channel-id = <0>;
-                       shmem = <&scmi_shm>;
-                       status = "disabled";
-
-                       scmi_clk: protocol@14 {
-                               reg = <0x14>;
-                               #clock-cells = <1>;
-                       };
-
-                       scmi_reset: protocol@16 {
-                               reg = <0x16>;
-                               #reset-cells = <1>;
-                       };
-               };
-       };
-
        soc {
                compatible = "simple-bus";
                #address-cells = <1>;
                interrupt-parent = <&intc>;
                ranges;
 
-               scmi_sram: sram@2ffff000 {
-                       compatible = "mmio-sram";
-                       reg = <0x2ffff000 0x1000>;
-                       #address-cells = <1>;
-                       #size-cells = <1>;
-                       ranges = <0 0x2ffff000 0x1000>;
-
-                       scmi_shm: scmi-sram@0 {
-                               compatible = "arm,scmi-shmem";
-                               reg = <0 0x80>;
-                               status = "disabled";
-                       };
-               };
-
                timers2: timer@40000000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "st,stm32-cec";
                        reg = <0x40016000 0x400>;
                        interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&rcc CEC_K>, <&clk_lse>;
+                       clocks = <&rcc CEC_K>, <&rcc CEC>;
                        clock-names = "cec", "hdmi-cec";
                        status = "disabled";
                };
                usbh_ohci: usb@5800c000 {
                        compatible = "generic-ohci";
                        reg = <0x5800c000 0x1000>;
-                       clocks = <&rcc USBH>, <&usbphyc>;
+                       clocks = <&usbphyc>, <&rcc USBH>;
                        resets = <&rcc USBH_R>;
                        interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                usbh_ehci: usb@5800d000 {
                        compatible = "generic-ehci";
                        reg = <0x5800d000 0x1000>;
-                       clocks = <&rcc USBH>;
+                       clocks = <&usbphyc>, <&rcc USBH>;
                        resets = <&rcc USBH_R>;
                        interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
                        companion = <&usbh_ohci>;
index e3d3f3f..e539cc8 100644 (file)
@@ -7,6 +7,7 @@
 /dts-v1/;
 
 #include "stm32mp157a-dk1.dts"
+#include "stm32mp15-scmi.dtsi"
 
 / {
        model = "STMicroelectronics STM32MP157A-DK1 SCMI Discovery Board";
        clocks = <&scmi_clk CK_SCMI_MPU>;
 };
 
+&dsi {
+       clocks = <&rcc DSI_K>, <&scmi_clk CK_SCMI_HSE>, <&rcc DSI_PX>;
+};
+
 &gpioz {
        clocks = <&scmi_clk CK_SCMI_GPIOZ>;
 };
        resets = <&scmi_reset RST_SCMI_MCU>;
 };
 
-&optee {
-       status = "okay";
-};
-
 &rcc {
        compatible = "st,stm32mp1-rcc-secure", "syscon";
        clock-names = "hse", "hsi", "csi", "lse", "lsi";
 &rtc {
        clocks = <&scmi_clk CK_SCMI_RTCAPB>, <&scmi_clk CK_SCMI_RTC>;
 };
-
-&scmi {
-       status = "okay";
-};
-
-&scmi_shm {
-       status = "okay";
-};
index 45dcd29..97e4f94 100644 (file)
@@ -7,6 +7,7 @@
 /dts-v1/;
 
 #include "stm32mp157c-dk2.dts"
+#include "stm32mp15-scmi.dtsi"
 
 / {
        model = "STMicroelectronics STM32MP157C-DK2 SCMI Discovery Board";
@@ -34,6 +35,7 @@
 };
 
 &dsi {
+       phy-dsi-supply = <&scmi_reg18>;
        clocks = <&rcc DSI_K>, <&scmi_clk CK_SCMI_HSE>, <&rcc DSI_PX>;
 };
 
        resets = <&scmi_reset RST_SCMI_MCU>;
 };
 
-&optee {
-       status = "okay";
-};
-
 &rcc {
        compatible = "st,stm32mp1-rcc-secure", "syscon";
        clock-names = "hse", "hsi", "csi", "lse", "lsi";
 &rtc {
        clocks = <&scmi_clk CK_SCMI_RTCAPB>, <&scmi_clk CK_SCMI_RTC>;
 };
-
-&scmi {
-       status = "okay";
-};
-
-&scmi_shm {
-       status = "okay";
-};
index 458e0ca..9cf0a44 100644 (file)
@@ -7,6 +7,7 @@
 /dts-v1/;
 
 #include "stm32mp157c-ed1.dts"
+#include "stm32mp15-scmi.dtsi"
 
 / {
        model = "STMicroelectronics STM32MP157C-ED1 SCMI eval daughter";
        resets = <&scmi_reset RST_SCMI_CRYP1>;
 };
 
+&dsi {
+       clocks = <&rcc DSI_K>, <&scmi_clk CK_SCMI_HSE>, <&rcc DSI_PX>;
+};
+
 &gpioz {
        clocks = <&scmi_clk CK_SCMI_GPIOZ>;
 };
        resets = <&scmi_reset RST_SCMI_MCU>;
 };
 
-&optee {
-       status = "okay";
-};
-
 &rcc {
        compatible = "st,stm32mp1-rcc-secure", "syscon";
        clock-names = "hse", "hsi", "csi", "lse", "lsi";
 &rtc {
        clocks = <&scmi_clk CK_SCMI_RTCAPB>, <&scmi_clk CK_SCMI_RTC>;
 };
-
-&scmi {
-       status = "okay";
-};
-
-&scmi_shm {
-       status = "okay";
-};
index df9c113..3b9dd6f 100644 (file)
@@ -7,6 +7,7 @@
 /dts-v1/;
 
 #include "stm32mp157c-ev1.dts"
+#include "stm32mp15-scmi.dtsi"
 
 / {
        model = "STMicroelectronics STM32MP157C-EV1 SCMI eval daughter on eval mother";
@@ -35,6 +36,7 @@
 };
 
 &dsi {
+       phy-dsi-supply = <&scmi_reg18>;
        clocks = <&rcc DSI_K>, <&scmi_clk CK_SCMI_HSE>, <&rcc DSI_PX>;
 };
 
        resets = <&scmi_reset RST_SCMI_MCU>;
 };
 
-&optee {
-       status = "okay";
-};
-
 &rcc {
        compatible = "st,stm32mp1-rcc-secure", "syscon";
        clock-names = "hse", "hsi", "csi", "lse", "lsi";
 &rtc {
        clocks = <&scmi_clk CK_SCMI_RTCAPB>, <&scmi_clk CK_SCMI_RTC>;
 };
-
-&scmi {
-       status = "okay";
-};
-
-&scmi_shm {
-       status = "okay";
-};
index f19ed98..3706216 100644 (file)
        flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "mxicy,mx25l1606e", "winbond,w25q128";
+               compatible = "mxicy,mx25l1606e", "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <40000000>;
        };
index ca32446..f53086d 100644 (file)
@@ -93,6 +93,7 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
 CONFIG_DRM_MXSFB=y
+CONFIG_FB=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_LCD_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
index a81dda6..45180a2 100644 (file)
@@ -10,7 +10,7 @@
 #else
 #define MAX_DMA_ADDRESS        ({ \
        extern phys_addr_t arm_dma_zone_size; \
-       arm_dma_zone_size && arm_dma_zone_size < (0x10000000 - PAGE_OFFSET) ? \
+       arm_dma_zone_size && arm_dma_zone_size < (0x100000000ULL - PAGE_OFFSET) ? \
                (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
 #endif
 
index f1d0a78..41536fe 100644 (file)
@@ -112,19 +112,6 @@ static __always_inline void set_domain(unsigned int val)
 }
 #endif
 
-#ifdef CONFIG_CPU_USE_DOMAINS
-#define modify_domain(dom,type)                                        \
-       do {                                                    \
-               unsigned int domain = get_domain();             \
-               domain &= ~domain_mask(dom);                    \
-               domain = domain | domain_val(dom, type);        \
-               set_domain(domain);                             \
-       } while (0)
-
-#else
-static inline void modify_domain(unsigned dom, unsigned type)  { }
-#endif
-
 /*
  * Generate the T (user) versions of the LDR/STR and related
  * instructions (inline assembly)
index eba7cbc..7fcdc78 100644 (file)
@@ -139,11 +139,9 @@ extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
 extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
 extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
 void __arm_iomem_set_ro(void __iomem *ptr, size_t size);
-extern void __iounmap(volatile void __iomem *addr);
 
 extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
        unsigned int, void *);
-extern void (*arch_iounmap)(volatile void __iomem *);
 
 /*
  * Bad read/write accesses...
@@ -380,7 +378,7 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
 #define ioremap_wc ioremap_wc
 #define ioremap_wt ioremap_wc
 
-void iounmap(volatile void __iomem *iomem_cookie);
+void iounmap(volatile void __iomem *io_addr);
 #define iounmap iounmap
 
 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size);
index 9228255..2b8970d 100644 (file)
@@ -27,6 +27,7 @@ enum {
        MT_HIGH_VECTORS,
        MT_MEMORY_RWX,
        MT_MEMORY_RW,
+       MT_MEMORY_RO,
        MT_ROM,
        MT_MEMORY_RWX_NONCACHED,
        MT_MEMORY_RW_DTCM,
index 93051e2..1408a6a 100644 (file)
@@ -163,5 +163,31 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
                ((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1;  \
 })
 
+
+/*
+ * Update ITSTATE after normal execution of an IT block instruction.
+ *
+ * The 8 IT state bits are split into two parts in CPSR:
+ *     ITSTATE<1:0> are in CPSR<26:25>
+ *     ITSTATE<7:2> are in CPSR<15:10>
+ */
+static inline unsigned long it_advance(unsigned long cpsr)
+{
+       if ((cpsr & 0x06000400) == 0) {
+               /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
+               cpsr &= ~PSR_IT_MASK;
+       } else {
+               /* We need to shift left ITSTATE<4:0> */
+               const unsigned long mask = 0x06001c00;  /* Mask ITSTATE<4:0> */
+               unsigned long it = cpsr & mask;
+               it <<= 1;
+               it |= it >> (27 - 10);  /* Carry ITSTATE<2> to correct place */
+               it &= mask;
+               cpsr &= ~mask;
+               cpsr |= it;
+       }
+       return cpsr;
+}
+
 #endif /* __ASSEMBLY__ */
 #endif
index 7aa3ded..6a447ac 100644 (file)
@@ -302,6 +302,7 @@ local_restart:
        b       ret_fast_syscall
 #endif
 ENDPROC(vector_swi)
+       .ltorg
 
        /*
         * This is the really slow path.  We're going to be doing
index b5e8b9a..7fd3600 100644 (file)
@@ -40,8 +40,8 @@ ENDPROC(_find_first_zero_bit_le)
  * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
  */
 ENTRY(_find_next_zero_bit_le)
-               teq     r1, #0
-               beq     3b
+               cmp     r2, r1
+               bhs     3b
                ands    ip, r2, #7
                beq     1b                      @ If new byte, goto old routine
  ARM(          ldrb    r3, [r0, r2, lsr #3]    )
@@ -81,8 +81,8 @@ ENDPROC(_find_first_bit_le)
  * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
  */
 ENTRY(_find_next_bit_le)
-               teq     r1, #0
-               beq     3b
+               cmp     r2, r1
+               bhs     3b
                ands    ip, r2, #7
                beq     1b                      @ If new byte, goto old routine
  ARM(          ldrb    r3, [r0, r2, lsr #3]    )
@@ -115,8 +115,8 @@ ENTRY(_find_first_zero_bit_be)
 ENDPROC(_find_first_zero_bit_be)
 
 ENTRY(_find_next_zero_bit_be)
-               teq     r1, #0
-               beq     3b
+               cmp     r2, r1
+               bhs     3b
                ands    ip, r2, #7
                beq     1b                      @ If new byte, goto old routine
                eor     r3, r2, #0x18           @ big endian byte ordering
@@ -149,8 +149,8 @@ ENTRY(_find_first_bit_be)
 ENDPROC(_find_first_bit_be)
 
 ENTRY(_find_next_bit_be)
-               teq     r1, #0
-               beq     3b
+               cmp     r2, r1
+               bhs     3b
                ands    ip, r2, #7
                beq     1b                      @ If new byte, goto old routine
                eor     r3, r2, #0x18           @ big endian byte ordering
index b1a43d7..df6d673 100644 (file)
@@ -202,7 +202,7 @@ static const struct wakeup_source_info ws_info[] = {
 
 static const struct of_device_id sama5d2_ws_ids[] = {
        { .compatible = "atmel,sama5d2-gem",            .data = &ws_info[0] },
-       { .compatible = "atmel,at91rm9200-rtc",         .data = &ws_info[1] },
+       { .compatible = "atmel,sama5d2-rtc",            .data = &ws_info[1] },
        { .compatible = "atmel,sama5d3-udc",            .data = &ws_info[2] },
        { .compatible = "atmel,at91rm9200-ohci",        .data = &ws_info[2] },
        { .compatible = "usb-ohci",                     .data = &ws_info[2] },
@@ -213,24 +213,24 @@ static const struct of_device_id sama5d2_ws_ids[] = {
 };
 
 static const struct of_device_id sam9x60_ws_ids[] = {
-       { .compatible = "atmel,at91sam9x5-rtc",         .data = &ws_info[1] },
+       { .compatible = "microchip,sam9x60-rtc",        .data = &ws_info[1] },
        { .compatible = "atmel,at91rm9200-ohci",        .data = &ws_info[2] },
        { .compatible = "usb-ohci",                     .data = &ws_info[2] },
        { .compatible = "atmel,at91sam9g45-ehci",       .data = &ws_info[2] },
        { .compatible = "usb-ehci",                     .data = &ws_info[2] },
-       { .compatible = "atmel,at91sam9260-rtt",        .data = &ws_info[4] },
+       { .compatible = "microchip,sam9x60-rtt",        .data = &ws_info[4] },
        { .compatible = "cdns,sam9x60-macb",            .data = &ws_info[5] },
        { /* sentinel */ }
 };
 
 static const struct of_device_id sama7g5_ws_ids[] = {
-       { .compatible = "atmel,at91sam9x5-rtc",         .data = &ws_info[1] },
+       { .compatible = "microchip,sama7g5-rtc",        .data = &ws_info[1] },
        { .compatible = "microchip,sama7g5-ohci",       .data = &ws_info[2] },
        { .compatible = "usb-ohci",                     .data = &ws_info[2] },
        { .compatible = "atmel,at91sam9g45-ehci",       .data = &ws_info[2] },
        { .compatible = "usb-ehci",                     .data = &ws_info[2] },
        { .compatible = "microchip,sama7g5-sdhci",      .data = &ws_info[3] },
-       { .compatible = "atmel,at91sam9260-rtt",        .data = &ws_info[4] },
+       { .compatible = "microchip,sama7g5-rtt",        .data = &ws_info[4] },
        { /* sentinel */ }
 };
 
@@ -1079,7 +1079,7 @@ securam_fail:
        return ret;
 }
 
-static void at91_pm_secure_init(void)
+static void __init at91_pm_secure_init(void)
 {
        int suspend_mode;
        struct arm_smccc_res res;
index 512943e..2e20362 100644 (file)
@@ -39,6 +39,7 @@ static int axxia_boot_secondary(unsigned int cpu, struct task_struct *idle)
                return -ENOENT;
 
        syscon = of_iomap(syscon_np, 0);
+       of_node_put(syscon_np);
        if (!syscon)
                return -ENOMEM;
 
index e4f4b20..3fc4ec8 100644 (file)
@@ -372,6 +372,7 @@ static void __init cns3xxx_init(void)
                /* De-Asscer SATA Reset */
                cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SATA));
        }
+       of_node_put(dn);
 
        dn = of_find_compatible_node(NULL, NULL, "cavium,cns3420-sdhci");
        if (of_device_is_available(dn)) {
@@ -385,6 +386,7 @@ static void __init cns3xxx_init(void)
                cns3xxx_pwr_clk_en(CNS3XXX_PWR_CLK_EN(SDIO));
                cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SDIO));
        }
+       of_node_put(dn);
 
        pm_power_off = cns3xxx_power_off;
 
index 8b48326..51a247c 100644 (file)
@@ -149,6 +149,7 @@ static void exynos_map_pmu(void)
        np = of_find_matching_node(NULL, exynos_dt_pmu_match);
        if (np)
                pmu_base_addr = of_iomap(np, 0);
+       of_node_put(np);
 }
 
 static void __init exynos_init_irq(void)
index 4b8ad72..32ac60b 100644 (file)
@@ -71,6 +71,7 @@ static void __init meson_smp_prepare_cpus(const char *scu_compatible,
        }
 
        sram_base = of_iomap(node, 0);
+       of_node_put(node);
        if (!sram_base) {
                pr_err("Couldn't map SRAM registers\n");
                return;
@@ -91,6 +92,7 @@ static void __init meson_smp_prepare_cpus(const char *scu_compatible,
        }
 
        scu_base = of_iomap(node, 0);
+       of_node_put(node);
        if (!scu_base) {
                pr_err("Couldn't map SCU registers\n");
                return;
index c546356..5738496 100644 (file)
@@ -549,7 +549,7 @@ static struct pxa2xx_spi_controller corgi_spi_info = {
 };
 
 static struct gpiod_lookup_table corgi_spi_gpio_table = {
-       .dev_id = "pxa2xx-spi.1",
+       .dev_id = "spi1",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW),
index 2ae06ed..2fd6659 100644 (file)
@@ -635,7 +635,7 @@ static struct pxa2xx_spi_controller pxa_ssp2_master_info = {
 };
 
 static struct gpiod_lookup_table pxa_ssp2_gpio_table = {
-       .dev_id = "pxa2xx-spi.2",
+       .dev_id = "spi2",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", GPIO88_HX4700_TSC2046_CS, "cs", 0, GPIO_ACTIVE_LOW),
                { },
index 753fe16..6240882 100644 (file)
@@ -140,7 +140,7 @@ struct platform_device pxa_spi_ssp4 = {
 };
 
 static struct gpiod_lookup_table pxa_ssp3_gpio_table = {
-       .dev_id = "pxa2xx-spi.3",
+       .dev_id = "spi3",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS1, "cs", 0, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS2, "cs", 1, GPIO_ACTIVE_LOW),
@@ -149,7 +149,7 @@ static struct gpiod_lookup_table pxa_ssp3_gpio_table = {
 };
 
 static struct gpiod_lookup_table pxa_ssp4_gpio_table = {
-       .dev_id = "pxa2xx-spi.4",
+       .dev_id = "spi4",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS3, "cs", 0, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS4, "cs", 1, GPIO_ACTIVE_LOW),
index f98dc61..98423a9 100644 (file)
@@ -207,7 +207,7 @@ static struct spi_board_info littleton_spi_devices[] __initdata = {
 };
 
 static struct gpiod_lookup_table littleton_spi_gpio_table = {
-       .dev_id = "pxa2xx-spi.2",
+       .dev_id = "spi2",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", LITTLETON_GPIO_LCD_CS, "cs", 0, GPIO_ACTIVE_LOW),
                { },
index 20456a5..0827ebc 100644 (file)
@@ -994,7 +994,7 @@ static struct pxa2xx_spi_controller magician_spi_info = {
 };
 
 static struct gpiod_lookup_table magician_spi_gpio_table = {
-       .dev_id = "pxa2xx-spi.2",
+       .dev_id = "spi2",
        .table = {
                /* NOTICE must be GPIO, incompatibility with hw PXA SPI framing */
                GPIO_LOOKUP_IDX("gpio-pxa", GPIO14_MAGICIAN_TSC2046_CS, "cs", 0, GPIO_ACTIVE_LOW),
index dd88953..9964729 100644 (file)
@@ -578,7 +578,7 @@ static struct pxa2xx_spi_controller spitz_spi_info = {
 };
 
 static struct gpiod_lookup_table spitz_spi_gpio_table = {
-       .dev_id = "pxa2xx-spi.2",
+       .dev_id = "spi2",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW),
index d035205..c4d4162 100644 (file)
@@ -623,7 +623,7 @@ static struct pxa2xx_spi_controller pxa_ssp2_master_info = {
 };
 
 static struct gpiod_lookup_table pxa_ssp1_gpio_table = {
-       .dev_id = "pxa2xx-spi.1",
+       .dev_id = "spi1",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", GPIO24_ZIPITZ2_WIFI_CS, "cs", 0, GPIO_ACTIVE_LOW),
                { },
@@ -631,7 +631,7 @@ static struct gpiod_lookup_table pxa_ssp1_gpio_table = {
 };
 
 static struct gpiod_lookup_table pxa_ssp2_gpio_table = {
-       .dev_id = "pxa2xx-spi.2",
+       .dev_id = "spi2",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", GPIO88_ZIPITZ2_LCD_CS, "cs", 0, GPIO_ACTIVE_LOW),
                { },
index 87389d9..30d781d 100644 (file)
@@ -311,7 +311,7 @@ void __init rockchip_suspend_init(void)
                                             &match);
        if (!match) {
                pr_err("Failed to find PMU node\n");
-               return;
+               goto out_put;
        }
        pm_data = (struct rockchip_pm_data *) match->data;
 
@@ -320,9 +320,12 @@ void __init rockchip_suspend_init(void)
 
                if (ret) {
                        pr_err("%s: matches init error %d\n", __func__, ret);
-                       return;
+                       goto out_put;
                }
        }
 
        suspend_set_ops(pm_data->ops);
+
+out_put:
+       of_node_put(np);
 }
index d1fdb60..c7c17c0 100644 (file)
@@ -218,13 +218,13 @@ void __init spear_setup_of_timer(void)
        irq = irq_of_parse_and_map(np, 0);
        if (!irq) {
                pr_err("%s: No irq passed for timer via DT\n", __func__);
-               return;
+               goto err_put_np;
        }
 
        gpt_base = of_iomap(np, 0);
        if (!gpt_base) {
                pr_err("%s: of iomap failed\n", __func__);
-               return;
+               goto err_put_np;
        }
 
        gpt_clk = clk_get_sys("gpt0", NULL);
@@ -239,6 +239,8 @@ void __init spear_setup_of_timer(void)
                goto err_prepare_enable_clk;
        }
 
+       of_node_put(np);
+
        spear_clockevent_init(irq);
        spear_clocksource_init();
 
@@ -248,4 +250,6 @@ err_prepare_enable_clk:
        clk_put(gpt_clk);
 err_iomap:
        iounmap(gpt_base);
+err_put_np:
+       of_node_put(np);
 }
index a3a4589..fc439c2 100644 (file)
@@ -631,7 +631,11 @@ config CPU_USE_DOMAINS
        bool
        help
          This option enables or disables the use of domain switching
-         via the set_fs() function.
+         using the DACR (domain access control register) to protect memory
+         domains from each other. In Linux we use three domains: kernel, user
+         and IO. The domains are used to protect userspace from kernelspace
+         and to handle IO-space as a special type of memory by assigning
+         manager or client roles to running code (such as a process).
 
 config CPU_V7M_NUM_IRQ
        int "Number of external interrupts connected to the NVIC"
index 6f49955..f8dd0b3 100644 (file)
@@ -935,6 +935,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        if (type == TYPE_LDST)
                do_alignment_finish_ldst(addr, instr, regs, offset);
 
+       if (thumb_mode(regs))
+               regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
+
        return 0;
 
  bad_or_fault:
index 576c0e6..2129070 100644 (file)
@@ -418,7 +418,7 @@ void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
                                                   __builtin_return_address(0));
 }
 
-void __iounmap(volatile void __iomem *io_addr)
+void iounmap(volatile void __iomem *io_addr)
 {
        void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
        struct static_vm *svm;
@@ -446,13 +446,6 @@ void __iounmap(volatile void __iomem *io_addr)
 
        vunmap(addr);
 }
-
-void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
-
-void iounmap(volatile void __iomem *cookie)
-{
-       arch_iounmap(cookie);
-}
 EXPORT_SYMBOL(iounmap);
 
 #if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
index 5e2be37..cd17e32 100644 (file)
@@ -296,6 +296,13 @@ static struct mem_type mem_types[] __ro_after_init = {
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
+       [MT_MEMORY_RO] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                            L_PTE_XN | L_PTE_RDONLY,
+               .prot_l1   = PMD_TYPE_TABLE,
+               .prot_sect = PMD_TYPE_SECT,
+               .domain    = DOMAIN_KERNEL,
+       },
        [MT_ROM] = {
                .prot_sect = PMD_TYPE_SECT,
                .domain    = DOMAIN_KERNEL,
@@ -489,6 +496,7 @@ static void __init build_mem_type_table(void)
 
                        /* Also setup NX memory mapping */
                        mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
+                       mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_XN;
                }
                if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
                        /*
@@ -568,6 +576,7 @@ static void __init build_mem_type_table(void)
                mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
                mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
                mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+               mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 #endif
 
                /*
@@ -587,6 +596,8 @@ static void __init build_mem_type_table(void)
                        mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
                        mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
+                       mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_S;
+                       mem_types[MT_MEMORY_RO].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
                        mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
@@ -647,6 +658,8 @@ static void __init build_mem_type_table(void)
        mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
        mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
        mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
+       mem_types[MT_MEMORY_RO].prot_sect |= ecc_mask | cp->pmd;
+       mem_types[MT_MEMORY_RO].prot_pte |= kern_pgprot;
        mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
        mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
        mem_types[MT_ROM].prot_sect |= cp->pmd;
@@ -1360,7 +1373,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
                map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK);
                map.virtual = FDT_FIXED_BASE;
                map.length = FDT_FIXED_SIZE;
-               map.type = MT_ROM;
+               map.type = MT_MEMORY_RO;
                create_mapping(&map);
        }
 
index 2658f52..c42deba 100644 (file)
@@ -230,14 +230,7 @@ void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
        return (void *)phys_addr;
 }
 
-void __iounmap(volatile void __iomem *addr)
-{
-}
-EXPORT_SYMBOL(__iounmap);
-
-void (*arch_iounmap)(volatile void __iomem *);
-
-void iounmap(volatile void __iomem *addr)
+void iounmap(volatile void __iomem *io_addr)
 {
 }
 EXPORT_SYMBOL(iounmap);
index fb9f3eb..8bc7a2d 100644 (file)
@@ -108,8 +108,7 @@ static unsigned int spectre_v2_install_workaround(unsigned int method)
 #else
 static unsigned int spectre_v2_install_workaround(unsigned int method)
 {
-       pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n",
-               smp_processor_id());
+       pr_info_once("Spectre V2: workarounds disabled by configuration\n");
 
        return SPECTRE_VULNERABLE;
 }
@@ -209,10 +208,10 @@ static int spectre_bhb_install_workaround(int method)
                        return SPECTRE_VULNERABLE;
 
                spectre_bhb_method = method;
-       }
 
-       pr_info("CPU%u: Spectre BHB: using %s workaround\n",
-               smp_processor_id(), spectre_bhb_method_name(method));
+               pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n",
+                       smp_processor_id(), spectre_bhb_method_name(method));
+       }
 
        return SPECTRE_MITIGATED;
 }
index 9731735..facc889 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/types.h>
 #include <linux/stddef.h>
 #include <asm/probes.h>
+#include <asm/ptrace.h>
 #include <asm/kprobes.h>
 
 void __init arm_probes_decode_init(void);
@@ -35,31 +36,6 @@ void __init find_str_pc_offset(void);
 #endif
 
 
-/*
- * Update ITSTATE after normal execution of an IT block instruction.
- *
- * The 8 IT state bits are split into two parts in CPSR:
- *     ITSTATE<1:0> are in CPSR<26:25>
- *     ITSTATE<7:2> are in CPSR<15:10>
- */
-static inline unsigned long it_advance(unsigned long cpsr)
-       {
-       if ((cpsr & 0x06000400) == 0) {
-               /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
-               cpsr &= ~PSR_IT_MASK;
-       } else {
-               /* We need to shift left ITSTATE<4:0> */
-               const unsigned long mask = 0x06001c00;  /* Mask ITSTATE<4:0> */
-               unsigned long it = cpsr & mask;
-               it <<= 1;
-               it |= it >> (27 - 10);  /* Carry ITSTATE<2> to correct place */
-               it &= mask;
-               cpsr &= ~mask;
-               cpsr |= it;
-       }
-       return cpsr;
-}
-
 static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs)
 {
        long cpsr = regs->ARM_cpsr;
index 84a1cea..309648c 100644 (file)
@@ -63,11 +63,12 @@ out:
 
 unsigned long __pfn_to_mfn(unsigned long pfn)
 {
-       struct rb_node *n = phys_to_mach.rb_node;
+       struct rb_node *n;
        struct xen_p2m_entry *entry;
        unsigned long irqflags;
 
        read_lock_irqsave(&p2m_lock, irqflags);
+       n = phys_to_mach.rb_node;
        while (n) {
                entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
                if (entry->pfn <= pfn &&
@@ -152,10 +153,11 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
        int rc;
        unsigned long irqflags;
        struct xen_p2m_entry *p2m_entry;
-       struct rb_node *n = phys_to_mach.rb_node;
+       struct rb_node *n;
 
        if (mfn == INVALID_P2M_ENTRY) {
                write_lock_irqsave(&p2m_lock, irqflags);
+               n = phys_to_mach.rb_node;
                while (n) {
                        p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
                        if (p2m_entry->pfn <= pfn &&
index 1652a98..340e611 100644 (file)
@@ -101,6 +101,7 @@ config ARM64
        select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
        select ARCH_WANT_LD_ORPHAN_WARN
        select ARCH_WANTS_NO_INSTR
+       select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES
        select ARCH_HAS_UBSAN_SANITIZE_ALL
        select ARM_AMBA
        select ARM_ARCH_TIMER
@@ -126,6 +127,7 @@ config ARM64
        select GENERIC_CPU_VULNERABILITIES
        select GENERIC_EARLY_IOREMAP
        select GENERIC_IDLE_POLL_SETUP
+       select GENERIC_IOREMAP
        select GENERIC_IRQ_IPI
        select GENERIC_IRQ_PROBE
        select GENERIC_IRQ_SHOW
@@ -188,6 +190,7 @@ config ARM64
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_GCC_PLUGINS
        select HAVE_HW_BREAKPOINT if PERF_EVENTS
+       select HAVE_IOREMAP_PROT
        select HAVE_IRQ_TIME_ACCOUNTING
        select HAVE_KVM
        select HAVE_NMI
@@ -226,6 +229,7 @@ config ARM64
        select THREAD_INFO_IN_TASK
        select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD
        select TRACE_IRQFLAGS_SUPPORT
+       select TRACE_IRQFLAGS_NMI_SUPPORT
        help
          ARM 64-bit (AArch64) Linux support.
 
@@ -503,6 +507,22 @@ config ARM64_ERRATUM_834220
 
          If unsure, say Y.
 
+config ARM64_ERRATUM_1742098
+       bool "Cortex-A57/A72: 1742098: ELR recorded incorrectly on interrupt taken between cryptographic instructions in a sequence"
+       depends on COMPAT
+       default y
+       help
+         This option removes the AES hwcap for aarch32 user-space to
+         workaround erratum 1742098 on Cortex-A57 and Cortex-A72.
+
+         Affected parts may corrupt the AES state if an interrupt is
+         taken between a pair of AES instructions. These instructions
+         are only present if the cryptography extensions are present.
+         All software should have a fallback implementation for CPUs
+         that don't implement the cryptography extensions.
+
+         If unsure, say Y.
+
 config ARM64_ERRATUM_845719
        bool "Cortex-A53: 845719: a load might read incorrect data"
        depends on COMPAT
@@ -821,6 +841,23 @@ config ARM64_ERRATUM_2224489
 
          If unsure, say Y.
 
+config ARM64_ERRATUM_2441009
+       bool "Cortex-A510: Completion of affected memory accesses might not be guaranteed by completion of a TLBI"
+       default y
+       select ARM64_WORKAROUND_REPEAT_TLBI
+       help
+         This option adds a workaround for ARM Cortex-A510 erratum #2441009.
+
+         Under very rare circumstances, affected Cortex-A510 CPUs
+         may not handle a race between a break-before-make sequence on one
+         CPU, and another CPU accessing the same page. This could allow a
+         store to a page that has been unmapped.
+
+         Work around this by adding the affected CPUs to the list that needs
+         TLB sequences to be done twice.
+
+         If unsure, say Y.
+
 config ARM64_ERRATUM_2064142
        bool "Cortex-A510: 2064142: workaround TRBE register writes while disabled"
        depends on CORESIGHT_TRBE
index ebe80fa..a0e3ded 100644 (file)
@@ -16,7 +16,7 @@
 
 OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
 
-targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo
+targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo Image.zst
 
 $(obj)/Image: vmlinux FORCE
        $(call if_changed,objcopy)
@@ -35,3 +35,6 @@ $(obj)/Image.lzma: $(obj)/Image FORCE
 
 $(obj)/Image.lzo: $(obj)/Image FORCE
        $(call if_changed,lzo)
+
+$(obj)/Image.zst: $(obj)/Image FORCE
+       $(call if_changed,zstd)
index 66023d5..d084c33 100644 (file)
@@ -9,6 +9,14 @@
                /delete-node/ cpu@3;
        };
 
+       timer {
+               compatible = "arm,armv8-timer";
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+       };
+
        pmu {
                compatible = "arm,cortex-a53-pmu";
                interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
index a4be040..967d2cd 100644 (file)
@@ -29,6 +29,8 @@
                        device_type = "cpu";
                        compatible = "brcm,brahma-b53";
                        reg = <0x0>;
+                       enable-method = "spin-table";
+                       cpu-release-addr = <0x0 0xfff8>;
                        next-level-cache = <&l2>;
                };
 
index 3170661..9c233c5 100644 (file)
                        interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&uart0_bus>;
-                       clocks = <&cmu_peri CLK_GOUT_UART0_EXT_UCLK>,
-                                <&cmu_peri CLK_GOUT_UART0_PCLK>;
+                       clocks = <&cmu_peri CLK_GOUT_UART0_PCLK>,
+                                <&cmu_peri CLK_GOUT_UART0_EXT_UCLK>;
                        clock-names = "uart", "clk_uart_baud0";
                        samsung,uart-fifosize = <64>;
                        status = "disabled";
                        interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&uart1_bus>;
-                       clocks = <&cmu_peri CLK_GOUT_UART1_EXT_UCLK>,
-                                <&cmu_peri CLK_GOUT_UART1_PCLK>;
+                       clocks = <&cmu_peri CLK_GOUT_UART1_PCLK>,
+                                <&cmu_peri CLK_GOUT_UART1_EXT_UCLK>;
                        clock-names = "uart", "clk_uart_baud0";
                        samsung,uart-fifosize = <256>;
                        status = "disabled";
                        interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&uart2_bus>;
-                       clocks = <&cmu_peri CLK_GOUT_UART2_EXT_UCLK>,
-                                <&cmu_peri CLK_GOUT_UART2_PCLK>;
+                       clocks = <&cmu_peri CLK_GOUT_UART2_PCLK>,
+                                <&cmu_peri CLK_GOUT_UART2_EXT_UCLK>;
                        clock-names = "uart", "clk_uart_baud0";
                        samsung,uart-fifosize = <256>;
                        status = "disabled";
index 92465f7..d5cdd77 100644 (file)
                        little-endian;
                };
 
-               efuse@1e80000 {
+               sfp: efuse@1e80000 {
                        compatible = "fsl,ls1028a-sfp";
                        reg = <0x0 0x1e80000 0x0 0x10000>;
+                       clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+                                           QORIQ_CLK_PLL_DIV(4)>;
+                       clock-names = "sfp";
                        #address-cells = <1>;
                        #size-cells = <1>;
 
index 4c3ac42..9a4de73 100644 (file)
 &iomuxc {
        pinctrl_eqos: eqosgrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC                             0x3
-                       MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO                           0x3
-                       MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0                       0x91
-                       MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1                       0x91
-                       MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2                       0x91
-                       MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3                       0x91
-                       MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK       0x91
-                       MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL                 0x91
-                       MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0                       0x1f
-                       MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1                       0x1f
-                       MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2                       0x1f
-                       MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3                       0x1f
-                       MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL                 0x1f
-                       MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK       0x1f
-                       MX8MP_IOMUXC_SAI2_RXC__GPIO4_IO22                               0x19
+                       MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC                             0x2
+                       MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO                           0x2
+                       MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0                       0x90
+                       MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1                       0x90
+                       MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2                       0x90
+                       MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3                       0x90
+                       MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK       0x90
+                       MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL                 0x90
+                       MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0                       0x16
+                       MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1                       0x16
+                       MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2                       0x16
+                       MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3                       0x16
+                       MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL                 0x16
+                       MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK       0x16
+                       MX8MP_IOMUXC_SAI2_RXC__GPIO4_IO22                               0x10
                >;
        };
 
        pinctrl_fec: fecgrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_SAI1_RXD2__ENET1_MDC               0x3
-                       MX8MP_IOMUXC_SAI1_RXD3__ENET1_MDIO              0x3
-                       MX8MP_IOMUXC_SAI1_RXD4__ENET1_RGMII_RD0         0x91
-                       MX8MP_IOMUXC_SAI1_RXD5__ENET1_RGMII_RD1         0x91
-                       MX8MP_IOMUXC_SAI1_RXD6__ENET1_RGMII_RD2         0x91
-                       MX8MP_IOMUXC_SAI1_RXD7__ENET1_RGMII_RD3         0x91
-                       MX8MP_IOMUXC_SAI1_TXC__ENET1_RGMII_RXC          0x91
-                       MX8MP_IOMUXC_SAI1_TXFS__ENET1_RGMII_RX_CTL      0x91
-                       MX8MP_IOMUXC_SAI1_TXD0__ENET1_RGMII_TD0         0x1f
-                       MX8MP_IOMUXC_SAI1_TXD1__ENET1_RGMII_TD1         0x1f
-                       MX8MP_IOMUXC_SAI1_TXD2__ENET1_RGMII_TD2         0x1f
-                       MX8MP_IOMUXC_SAI1_TXD3__ENET1_RGMII_TD3         0x1f
-                       MX8MP_IOMUXC_SAI1_TXD4__ENET1_RGMII_TX_CTL      0x1f
-                       MX8MP_IOMUXC_SAI1_TXD5__ENET1_RGMII_TXC         0x1f
-                       MX8MP_IOMUXC_SAI1_RXD0__GPIO4_IO02              0x19
+                       MX8MP_IOMUXC_SAI1_RXD2__ENET1_MDC               0x2
+                       MX8MP_IOMUXC_SAI1_RXD3__ENET1_MDIO              0x2
+                       MX8MP_IOMUXC_SAI1_RXD4__ENET1_RGMII_RD0         0x90
+                       MX8MP_IOMUXC_SAI1_RXD5__ENET1_RGMII_RD1         0x90
+                       MX8MP_IOMUXC_SAI1_RXD6__ENET1_RGMII_RD2         0x90
+                       MX8MP_IOMUXC_SAI1_RXD7__ENET1_RGMII_RD3         0x90
+                       MX8MP_IOMUXC_SAI1_TXC__ENET1_RGMII_RXC          0x90
+                       MX8MP_IOMUXC_SAI1_TXFS__ENET1_RGMII_RX_CTL      0x90
+                       MX8MP_IOMUXC_SAI1_TXD0__ENET1_RGMII_TD0         0x16
+                       MX8MP_IOMUXC_SAI1_TXD1__ENET1_RGMII_TD1         0x16
+                       MX8MP_IOMUXC_SAI1_TXD2__ENET1_RGMII_TD2         0x16
+                       MX8MP_IOMUXC_SAI1_TXD3__ENET1_RGMII_TD3         0x16
+                       MX8MP_IOMUXC_SAI1_TXD4__ENET1_RGMII_TX_CTL      0x16
+                       MX8MP_IOMUXC_SAI1_TXD5__ENET1_RGMII_TXC         0x16
+                       MX8MP_IOMUXC_SAI1_RXD0__GPIO4_IO02              0x10
                >;
        };
 
 
        pinctrl_gpio_led: gpioledgrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_NAND_READY_B__GPIO3_IO16   0x19
+                       MX8MP_IOMUXC_NAND_READY_B__GPIO3_IO16   0x140
                >;
        };
 
        pinctrl_i2c1: i2c1grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL         0x400001c3
-                       MX8MP_IOMUXC_I2C1_SDA__I2C1_SDA         0x400001c3
+                       MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL         0x400001c2
+                       MX8MP_IOMUXC_I2C1_SDA__I2C1_SDA         0x400001c2
                >;
        };
 
        pinctrl_i2c3: i2c3grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL         0x400001c3
-                       MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA         0x400001c3
+                       MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL         0x400001c2
+                       MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA         0x400001c2
                >;
        };
 
        pinctrl_i2c5: i2c5grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_SPDIF_RX__I2C5_SDA         0x400001c3
-                       MX8MP_IOMUXC_SPDIF_TX__I2C5_SCL         0x400001c3
+                       MX8MP_IOMUXC_SPDIF_RX__I2C5_SDA         0x400001c2
+                       MX8MP_IOMUXC_SPDIF_TX__I2C5_SCL         0x400001c2
                >;
        };
 
 
        pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19    0x41
+                       MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19    0x40
                >;
        };
 
        pinctrl_uart2: uart2grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX    0x49
-                       MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX    0x49
+                       MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX    0x140
+                       MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX    0x140
                >;
        };
 
        pinctrl_usb1_vbus: usb1grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_GPIO1_IO14__USB2_OTG_PWR   0x19
+                       MX8MP_IOMUXC_GPIO1_IO14__USB2_OTG_PWR   0x10
                >;
        };
 
                        MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1    0x1d0
                        MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2    0x1d0
                        MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3    0x1d0
-                       MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+                       MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0
                >;
        };
 
                        MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1    0x1d4
                        MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2    0x1d4
                        MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3    0x1d4
-                       MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+                       MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0
                >;
        };
 
                        MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1    0x1d6
                        MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2    0x1d6
                        MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3    0x1d6
-                       MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+                       MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0
                >;
        };
 
index 70a701a..dd703b6 100644 (file)
 &iomuxc {
        pinctrl_eqos: eqosgrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC                             0x3
-                       MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO                           0x3
-                       MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0                       0x91
-                       MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1                       0x91
-                       MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2                       0x91
-                       MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3                       0x91
-                       MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK       0x91
-                       MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL                 0x91
-                       MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0                       0x1f
-                       MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1                       0x1f
-                       MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2                       0x1f
-                       MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3                       0x1f
-                       MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL                 0x1f
-                       MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK       0x1f
-                       MX8MP_IOMUXC_NAND_DATA01__GPIO3_IO07                            0x19
+                       MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC                             0x2
+                       MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO                           0x2
+                       MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0                       0x90
+                       MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1                       0x90
+                       MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2                       0x90
+                       MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3                       0x90
+                       MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK       0x90
+                       MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL                 0x90
+                       MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0                       0x16
+                       MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1                       0x16
+                       MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2                       0x16
+                       MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3                       0x16
+                       MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL                 0x16
+                       MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK       0x16
+                       MX8MP_IOMUXC_NAND_DATA01__GPIO3_IO07                            0x10
                >;
        };
 
        pinctrl_uart2: uart2grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX    0x49
-                       MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX    0x49
+                       MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX    0x40
+                       MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX    0x40
                >;
        };
 
                        MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1    0x1d0
                        MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2    0x1d0
                        MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3    0x1d0
-                       MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+                       MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0
                >;
        };
 
 
        pinctrl_reg_usb1: regusb1grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_GPIO1_IO14__GPIO1_IO14     0x19
+                       MX8MP_IOMUXC_GPIO1_IO14__GPIO1_IO14     0x10
                >;
        };
 
        pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19    0x41
+                       MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19    0x40
                >;
        };
 };
index 984a6b9..6aa720b 100644 (file)
 &iomuxc {
        pinctrl_eqos: eqosgrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC                     0x3
-                       MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO                   0x3
-                       MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0               0x91
-                       MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1               0x91
-                       MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2               0x91
-                       MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3               0x91
-                       MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK       0x91
-                       MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL         0x91
-                       MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0               0x1f
-                       MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1               0x1f
-                       MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2               0x1f
-                       MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3               0x1f
-                       MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL         0x1f
-                       MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK       0x1f
+                       MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC                     0x2
+                       MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO                   0x2
+                       MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0               0x90
+                       MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1               0x90
+                       MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2               0x90
+                       MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3               0x90
+                       MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK       0x90
+                       MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL         0x90
+                       MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0               0x16
+                       MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1               0x16
+                       MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2               0x16
+                       MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3               0x16
+                       MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL         0x16
+                       MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK       0x16
                        MX8MP_IOMUXC_SAI1_MCLK__GPIO4_IO20                      0x10
                >;
        };
 
        pinctrl_i2c2: i2c2grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL         0x400001c3
-                       MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA         0x400001c3
+                       MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL         0x400001c2
+                       MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA         0x400001c2
                >;
        };
 
        pinctrl_i2c2_gpio: i2c2gpiogrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_I2C2_SCL__GPIO5_IO16       0x1e3
-                       MX8MP_IOMUXC_I2C2_SDA__GPIO5_IO17       0x1e3
+                       MX8MP_IOMUXC_I2C2_SCL__GPIO5_IO16       0x1e2
+                       MX8MP_IOMUXC_I2C2_SDA__GPIO5_IO17       0x1e2
                >;
        };
 
        pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19    0x41
+                       MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19    0x40
                >;
        };
 
        pinctrl_uart1: uart1grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_UART1_RXD__UART1_DCE_RX    0x49
-                       MX8MP_IOMUXC_UART1_TXD__UART1_DCE_TX    0x49
+                       MX8MP_IOMUXC_UART1_RXD__UART1_DCE_RX    0x40
+                       MX8MP_IOMUXC_UART1_TXD__UART1_DCE_TX    0x40
                >;
        };
 
                        MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1    0x1d0
                        MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2    0x1d0
                        MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3    0x1d0
-                       MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+                       MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0
                >;
        };
 
                        MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1    0x1d4
                        MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2    0x1d4
                        MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3    0x1d4
-                       MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+                       MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0
                >;
        };
 
                        MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1    0x1d6
                        MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2    0x1d6
                        MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3    0x1d6
-                       MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+                       MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0
                >;
        };
 };
index 101d311..5212155 100644 (file)
 
        pinctrl_hog: hoggrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_GPIO1_IO09__GPIO1_IO09     0x40000041 /* DIO0 */
-                       MX8MP_IOMUXC_GPIO1_IO11__GPIO1_IO11     0x40000041 /* DIO1 */
-                       MX8MP_IOMUXC_NAND_DQS__GPIO3_IO14       0x40000041 /* M2SKT_OFF# */
-                       MX8MP_IOMUXC_SD2_DATA2__GPIO2_IO17      0x40000159 /* PCIE1_WDIS# */
-                       MX8MP_IOMUXC_SD2_DATA3__GPIO2_IO18      0x40000159 /* PCIE2_WDIS# */
-                       MX8MP_IOMUXC_SD2_CMD__GPIO2_IO14        0x40000159 /* PCIE3_WDIS# */
-                       MX8MP_IOMUXC_NAND_DATA00__GPIO3_IO06    0x40000041 /* M2SKT_RST# */
-                       MX8MP_IOMUXC_SAI1_TXD6__GPIO4_IO18      0x40000159 /* M2SKT_WDIS# */
-                       MX8MP_IOMUXC_NAND_ALE__GPIO3_IO00       0x40000159 /* M2SKT_GDIS# */
+                       MX8MP_IOMUXC_GPIO1_IO09__GPIO1_IO09     0x40000040 /* DIO0 */
+                       MX8MP_IOMUXC_GPIO1_IO11__GPIO1_IO11     0x40000040 /* DIO1 */
+                       MX8MP_IOMUXC_NAND_DQS__GPIO3_IO14       0x40000040 /* M2SKT_OFF# */
+                       MX8MP_IOMUXC_SD2_DATA2__GPIO2_IO17      0x40000150 /* PCIE1_WDIS# */
+                       MX8MP_IOMUXC_SD2_DATA3__GPIO2_IO18      0x40000150 /* PCIE2_WDIS# */
+                       MX8MP_IOMUXC_SD2_CMD__GPIO2_IO14        0x40000150 /* PCIE3_WDIS# */
+                       MX8MP_IOMUXC_NAND_DATA00__GPIO3_IO06    0x40000040 /* M2SKT_RST# */
+                       MX8MP_IOMUXC_SAI1_TXD6__GPIO4_IO18      0x40000150 /* M2SKT_WDIS# */
+                       MX8MP_IOMUXC_NAND_ALE__GPIO3_IO00       0x40000150 /* M2SKT_GDIS# */
                        MX8MP_IOMUXC_SAI3_TXD__GPIO5_IO01       0x40000104 /* UART_TERM */
                        MX8MP_IOMUXC_SAI3_TXFS__GPIO4_IO31      0x40000104 /* UART_RS485 */
                        MX8MP_IOMUXC_SAI3_TXC__GPIO5_IO00       0x40000104 /* UART_HALF */
 
        pinctrl_accel: accelgrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_GPIO1_IO07__GPIO1_IO07     0x159
+                       MX8MP_IOMUXC_GPIO1_IO07__GPIO1_IO07     0x150
                >;
        };
 
        pinctrl_eqos: eqosgrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC                             0x3
-                       MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO                           0x3
-                       MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0               0x91
-                       MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1               0x91
-                       MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2               0x91
-                       MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3               0x91
-                       MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK       0x91
-                       MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL         0x91
-                       MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0               0x1f
-                       MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1               0x1f
-                       MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2               0x1f
-                       MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3               0x1f
-                       MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL         0x1f
-                       MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK       0x1f
-                       MX8MP_IOMUXC_SAI3_RXD__GPIO4_IO30               0x141 /* RST# */
-                       MX8MP_IOMUXC_SAI3_RXFS__GPIO4_IO28              0x159 /* IRQ# */
+                       MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC                             0x2
+                       MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO                           0x2
+                       MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0               0x90
+                       MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1               0x90
+                       MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2               0x90
+                       MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3               0x90
+                       MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK       0x90
+                       MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL         0x90
+                       MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0               0x16
+                       MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1               0x16
+                       MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2               0x16
+                       MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3               0x16
+                       MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL         0x16
+                       MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK       0x16
+                       MX8MP_IOMUXC_SAI3_RXD__GPIO4_IO30               0x140 /* RST# */
+                       MX8MP_IOMUXC_SAI3_RXFS__GPIO4_IO28              0x150 /* IRQ# */
                >;
        };
 
        pinctrl_fec: fecgrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_SAI1_RXD4__ENET1_RGMII_RD0         0x91
-                       MX8MP_IOMUXC_SAI1_RXD5__ENET1_RGMII_RD1         0x91
-                       MX8MP_IOMUXC_SAI1_RXD6__ENET1_RGMII_RD2         0x91
-                       MX8MP_IOMUXC_SAI1_RXD7__ENET1_RGMII_RD3         0x91
-                       MX8MP_IOMUXC_SAI1_TXC__ENET1_RGMII_RXC          0x91
-                       MX8MP_IOMUXC_SAI1_TXFS__ENET1_RGMII_RX_CTL      0x91
-                       MX8MP_IOMUXC_SAI1_TXD0__ENET1_RGMII_TD0         0x1f
-                       MX8MP_IOMUXC_SAI1_TXD1__ENET1_RGMII_TD1         0x1f
-                       MX8MP_IOMUXC_SAI1_TXD2__ENET1_RGMII_TD2         0x1f
-                       MX8MP_IOMUXC_SAI1_TXD3__ENET1_RGMII_TD3         0x1f
-                       MX8MP_IOMUXC_SAI1_TXD4__ENET1_RGMII_TX_CTL      0x1f
-                       MX8MP_IOMUXC_SAI1_TXD5__ENET1_RGMII_TXC         0x1f
-                       MX8MP_IOMUXC_SAI1_RXFS__ENET1_1588_EVENT0_IN    0x141
-                       MX8MP_IOMUXC_SAI1_RXC__ENET1_1588_EVENT0_OUT    0x141
+                       MX8MP_IOMUXC_SAI1_RXD4__ENET1_RGMII_RD0         0x90
+                       MX8MP_IOMUXC_SAI1_RXD5__ENET1_RGMII_RD1         0x90
+                       MX8MP_IOMUXC_SAI1_RXD6__ENET1_RGMII_RD2         0x90
+                       MX8MP_IOMUXC_SAI1_RXD7__ENET1_RGMII_RD3         0x90
+                       MX8MP_IOMUXC_SAI1_TXC__ENET1_RGMII_RXC          0x90
+                       MX8MP_IOMUXC_SAI1_TXFS__ENET1_RGMII_RX_CTL      0x90
+                       MX8MP_IOMUXC_SAI1_TXD0__ENET1_RGMII_TD0         0x16
+                       MX8MP_IOMUXC_SAI1_TXD1__ENET1_RGMII_TD1         0x16
+                       MX8MP_IOMUXC_SAI1_TXD2__ENET1_RGMII_TD2         0x16
+                       MX8MP_IOMUXC_SAI1_TXD3__ENET1_RGMII_TD3         0x16
+                       MX8MP_IOMUXC_SAI1_TXD4__ENET1_RGMII_TX_CTL      0x16
+                       MX8MP_IOMUXC_SAI1_TXD5__ENET1_RGMII_TXC         0x16
+                       MX8MP_IOMUXC_SAI1_RXFS__ENET1_1588_EVENT0_IN    0x140
+                       MX8MP_IOMUXC_SAI1_RXC__ENET1_1588_EVENT0_OUT    0x140
                >;
        };
 
 
        pinctrl_gsc: gscgrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_SAI1_MCLK__GPIO4_IO20      0x159
+                       MX8MP_IOMUXC_SAI1_MCLK__GPIO4_IO20      0x150
                >;
        };
 
        pinctrl_i2c1: i2c1grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL         0x400001c3
-                       MX8MP_IOMUXC_I2C1_SDA__I2C1_SDA         0x400001c3
+                       MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL         0x400001c2
+                       MX8MP_IOMUXC_I2C1_SDA__I2C1_SDA         0x400001c2
                >;
        };
 
        pinctrl_i2c2: i2c2grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL         0x400001c3
-                       MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA         0x400001c3
+                       MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL         0x400001c2
+                       MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA         0x400001c2
                >;
        };
 
        pinctrl_i2c3: i2c3grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL         0x400001c3
-                       MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA         0x400001c3
+                       MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL         0x400001c2
+                       MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA         0x400001c2
                >;
        };
 
        pinctrl_i2c4: i2c4grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_I2C4_SCL__I2C4_SCL         0x400001c3
-                       MX8MP_IOMUXC_I2C4_SDA__I2C4_SDA         0x400001c3
+                       MX8MP_IOMUXC_I2C4_SCL__I2C4_SCL         0x400001c2
+                       MX8MP_IOMUXC_I2C4_SDA__I2C4_SDA         0x400001c2
                >;
        };
 
        pinctrl_ksz: kszgrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_SAI3_RXC__GPIO4_IO29       0x159 /* IRQ# */
-                       MX8MP_IOMUXC_SAI3_MCLK__GPIO5_IO02      0x141 /* RST# */
+                       MX8MP_IOMUXC_SAI3_RXC__GPIO4_IO29       0x150 /* IRQ# */
+                       MX8MP_IOMUXC_SAI3_MCLK__GPIO5_IO02      0x140 /* RST# */
                >;
        };
 
        pinctrl_gpio_leds: ledgrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_SD2_DATA0__GPIO2_IO15      0x19
-                       MX8MP_IOMUXC_SD2_DATA1__GPIO2_IO16      0x19
+                       MX8MP_IOMUXC_SD2_DATA0__GPIO2_IO15      0x10
+                       MX8MP_IOMUXC_SD2_DATA1__GPIO2_IO16      0x10
                >;
        };
 
        pinctrl_pmic: pmicgrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_NAND_DATA01__GPIO3_IO07    0x141
+                       MX8MP_IOMUXC_NAND_DATA01__GPIO3_IO07    0x140
                >;
        };
 
        pinctrl_pps: ppsgrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_GPIO1_IO12__GPIO1_IO12     0x141
+                       MX8MP_IOMUXC_GPIO1_IO12__GPIO1_IO12     0x140
                >;
        };
 
 
        pinctrl_reg_usb2: regusb2grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_GPIO1_IO06__GPIO1_IO06     0x141
+                       MX8MP_IOMUXC_GPIO1_IO06__GPIO1_IO06     0x140
                >;
        };
 
        pinctrl_reg_wifi: regwifigrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_NAND_DATA03__GPIO3_IO09    0x119
+                       MX8MP_IOMUXC_NAND_DATA03__GPIO3_IO09    0x110
                >;
        };
 
 
        pinctrl_uart3_gpio: uart3gpiogrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_NAND_DATA02__GPIO3_IO08    0x119
+                       MX8MP_IOMUXC_NAND_DATA02__GPIO3_IO08    0x110
                >;
        };
 
index d9542df..410d0d5 100644 (file)
                                        pgc_ispdwp: power-domain@18 {
                                                #power-domain-cells = <0>;
                                                reg = <IMX8MP_POWER_DOMAIN_MEDIAMIX_ISPDWP>;
-                                               clocks = <&clk IMX8MP_CLK_MEDIA_ISP_DIV>;
+                                               clocks = <&clk IMX8MP_CLK_MEDIA_ISP_ROOT>;
                                        };
                                };
                        };
index 59ea8a2..824d401 100644 (file)
@@ -79,7 +79,7 @@
                };
        };
 
-       soc {
+       soc@0 {
                compatible = "simple-bus";
                #address-cells = <1>;
                #size-cells = <1>;
index 3b0cc85..71e373b 100644 (file)
@@ -74,7 +74,7 @@
                vdd_l17_29-supply = <&vph_pwr>;
                vdd_l20_21-supply = <&vph_pwr>;
                vdd_l25-supply = <&pm8994_s5>;
-               vdd_lvs1_2 = <&pm8994_s4>;
+               vdd_lvs1_2-supply = <&pm8994_s4>;
 
                /* S1, S2, S6 and S12 are managed by RPMPD */
 
index 7748b74..afa91ca 100644 (file)
                vdd_l17_29-supply = <&vph_pwr>;
                vdd_l20_21-supply = <&vph_pwr>;
                vdd_l25-supply = <&pm8994_s5>;
-               vdd_lvs1_2 = <&pm8994_s4>;
+               vdd_lvs1_2-supply = <&pm8994_s4>;
 
                /* S1, S2, S6 and S12 are managed by RPMPD */
 
index 0318d42..1ac2913 100644 (file)
                CPU6: cpu@102 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a57";
-                       reg = <0x0 0x101>;
+                       reg = <0x0 0x102>;
                        enable-method = "psci";
                        next-level-cache = <&L2_1>;
                };
                CPU7: cpu@103 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a57";
-                       reg = <0x0 0x101>;
+                       reg = <0x0 0x103>;
                        enable-method = "psci";
                        next-level-cache = <&L2_1>;
                };
index 9b3e3d1..d1e2df5 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2021 Google LLC.
  */
 
-#include "sc7180-trogdor.dtsi"
+/* This file must be included after sc7180-trogdor.dtsi */
 
 / {
        /* BOARD-SPECIFIC TOP LEVEL NODES */
index fe2369c..88f6a7d 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2020 Google LLC.
  */
 
-#include "sc7180-trogdor.dtsi"
+/* This file must be included after sc7180-trogdor.dtsi */
 
 &ap_sar_sensor {
        semtech,cs0-ground;
index 0692ae0..038538c 100644 (file)
 
                        power-domains = <&dispcc MDSS_GDSC>;
 
-                       clocks = <&gcc GCC_DISP_AHB_CLK>,
+                       clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
                                 <&dispcc DISP_CC_MDSS_MDP_CLK>;
                        clock-names = "iface", "core";
 
index 7d08fad..b87756b 100644 (file)
                        reg = <0x0 0x17100000 0x0 0x10000>,     /* GICD */
                              <0x0 0x17180000 0x0 0x200000>;    /* GICR * 8 */
                        interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+                       #address-cells = <2>;
+                       #size-cells = <2>;
+                       ranges;
+
+                       gic_its: msi-controller@17140000 {
+                               compatible = "arm,gic-v3-its";
+                               reg = <0x0 0x17140000 0x0 0x20000>;
+                               msi-controller;
+                               #msi-cells = <1>;
+                       };
                };
 
                timer@17420000 {
 
                        iommus = <&apps_smmu 0xe0 0x0>;
 
-                       interconnects = <&aggre1_noc MASTER_UFS_MEM &mc_virt SLAVE_EBI1>,
-                                       <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_UFS_MEM_CFG>;
+                       interconnects = <&aggre1_noc MASTER_UFS_MEM 0 &mc_virt SLAVE_EBI1 0>,
+                                       <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_UFS_MEM_CFG 0>;
                        interconnect-names = "ufs-ddr", "cpu-ufs";
                        clock-names =
                                "core_clk",
index 913d845..1977103 100644 (file)
@@ -376,7 +376,8 @@ camera: &i2c7 {
                <&cru ACLK_VIO>,
                <&cru ACLK_GIC_PRE>,
                <&cru PCLK_DDR>,
-               <&cru ACLK_HDCP>;
+               <&cru ACLK_HDCP>,
+               <&cru ACLK_VDU>;
        assigned-clock-rates =
                <600000000>, <1600000000>,
                <1000000000>,
@@ -388,6 +389,7 @@ camera: &i2c7 {
                <400000000>,
                <200000000>,
                <200000000>,
+               <400000000>,
                <400000000>;
 };
 
index fbd0346..9d5b0e8 100644 (file)
                        <&cru HCLK_PERILP1>, <&cru PCLK_PERILP1>,
                        <&cru ACLK_VIO>, <&cru ACLK_HDCP>,
                        <&cru ACLK_GIC_PRE>,
-                       <&cru PCLK_DDR>;
+                       <&cru PCLK_DDR>,
+                       <&cru ACLK_VDU>;
                assigned-clock-rates =
                         <594000000>,  <800000000>,
                        <1000000000>,
                         <100000000>,   <50000000>,
                         <400000000>, <400000000>,
                         <200000000>,
-                        <200000000>;
+                        <200000000>,
+                        <400000000>;
        };
 
        grf: syscon@ff770000 {
index 1534e11..fa953b7 100644 (file)
 };
 
 &usb_host0_xhci {
+       dr_mode = "host";
        status = "okay";
 };
 
index 7bdcecc..02d5f5a 100644 (file)
        assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru SCLK_GMAC1>;
        assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru SCLK_GMAC1>, <&gmac1_clkin>;
        clock_in_out = "input";
-       phy-mode = "rgmii-id";
+       phy-mode = "rgmii";
        phy-supply = <&vcc_3v3>;
        pinctrl-names = "default";
        pinctrl-0 = <&gmac1m1_miim
index f64b368..cdb5305 100644 (file)
                clock-names = "clk_ahb", "clk_xin";
                mmc-ddr-1_8v;
                mmc-hs200-1_8v;
-               mmc-hs400-1_8v;
                ti,trm-icp = <0x2>;
                ti,otap-del-sel-legacy = <0x0>;
                ti,otap-del-sel-mmc-hs = <0x0>;
                ti,otap-del-sel-ddr52 = <0x6>;
                ti,otap-del-sel-hs200 = <0x7>;
-               ti,otap-del-sel-hs400 = <0x4>;
        };
 
        sdhci1: mmc@fa00000 {
index be7f392..19966f7 100644 (file)
@@ -33,7 +33,7 @@
                ranges;
                #interrupt-cells = <3>;
                interrupt-controller;
-               reg = <0x00 0x01800000 0x00 0x200000>, /* GICD */
+               reg = <0x00 0x01800000 0x00 0x100000>, /* GICD */
                      <0x00 0x01900000 0x00 0x100000>, /* GICR */
                      <0x00 0x6f000000 0x00 0x2000>,   /* GICC */
                      <0x00 0x6f010000 0x00 0x1000>,   /* GICH */
index c39f243..980d1dd 100644 (file)
@@ -2,12 +2,27 @@
 #ifndef __ASM_ASM_EXTABLE_H
 #define __ASM_ASM_EXTABLE_H
 
+#include <linux/bits.h>
+#include <asm/gpr-num.h>
+
 #define EX_TYPE_NONE                   0
-#define EX_TYPE_FIXUP                  1
-#define EX_TYPE_BPF                    2
-#define EX_TYPE_UACCESS_ERR_ZERO       3
+#define EX_TYPE_BPF                    1
+#define EX_TYPE_UACCESS_ERR_ZERO       2
+#define EX_TYPE_KACCESS_ERR_ZERO       3
 #define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
 
+/* Data fields for EX_TYPE_UACCESS_ERR_ZERO */
+#define EX_DATA_REG_ERR_SHIFT  0
+#define EX_DATA_REG_ERR                GENMASK(4, 0)
+#define EX_DATA_REG_ZERO_SHIFT 5
+#define EX_DATA_REG_ZERO       GENMASK(9, 5)
+
+/* Data fields for EX_TYPE_LOAD_UNALIGNED_ZEROPAD */
+#define EX_DATA_REG_DATA_SHIFT 0
+#define EX_DATA_REG_DATA       GENMASK(4, 0)
+#define EX_DATA_REG_ADDR_SHIFT 5
+#define EX_DATA_REG_ADDR       GENMASK(9, 5)
+
 #ifdef __ASSEMBLY__
 
 #define __ASM_EXTABLE_RAW(insn, fixup, type, data)     \
        .short          (data);                         \
        .popsection;
 
+#define EX_DATA_REG(reg, gpr)  \
+       (.L__gpr_num_##gpr << EX_DATA_REG_##reg##_SHIFT)
+
+#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)          \
+       __ASM_EXTABLE_RAW(insn, fixup,                                  \
+                         EX_TYPE_UACCESS_ERR_ZERO,                     \
+                         (                                             \
+                           EX_DATA_REG(ERR, err) |                     \
+                           EX_DATA_REG(ZERO, zero)                     \
+                         ))
+
+#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err)                     \
+       _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr)
+
+#define _ASM_EXTABLE_UACCESS(insn, fixup)                              \
+       _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, wzr, wzr)
+
 /*
- * Create an exception table entry for `insn`, which will branch to `fixup`
+ * Create an exception table entry for uaccess `insn`, which will branch to `fixup`
  * when an unhandled fault is taken.
  */
-       .macro          _asm_extable, insn, fixup
-       __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
+       .macro          _asm_extable_uaccess, insn, fixup
+       _ASM_EXTABLE_UACCESS(\insn, \fixup)
        .endm
 
 /*
  * Create an exception table entry for `insn` if `fixup` is provided. Otherwise
  * do nothing.
  */
-       .macro          _cond_extable, insn, fixup
-       .ifnc           \fixup,
-       _asm_extable    \insn, \fixup
+       .macro          _cond_uaccess_extable, insn, fixup
+       .ifnc                   \fixup,
+       _asm_extable_uaccess    \insn, \fixup
        .endif
        .endm
 
 #else /* __ASSEMBLY__ */
 
-#include <linux/bits.h>
 #include <linux/stringify.h>
 
-#include <asm/gpr-num.h>
-
 #define __ASM_EXTABLE_RAW(insn, fixup, type, data)     \
        ".pushsection   __ex_table, \"a\"\n"            \
        ".align         2\n"                            \
        ".short         (" data ")\n"                   \
        ".popsection\n"
 
-#define _ASM_EXTABLE(insn, fixup) \
-       __ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0")
-
-#define EX_DATA_REG_ERR_SHIFT  0
-#define EX_DATA_REG_ERR                GENMASK(4, 0)
-#define EX_DATA_REG_ZERO_SHIFT 5
-#define EX_DATA_REG_ZERO       GENMASK(9, 5)
-
 #define EX_DATA_REG(reg, gpr)                                          \
        "((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
 
                            EX_DATA_REG(ZERO, zero)                     \
                          ")")
 
+#define _ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, err, zero)          \
+       __DEFINE_ASM_GPR_NUMS                                           \
+       __ASM_EXTABLE_RAW(#insn, #fixup,                                \
+                         __stringify(EX_TYPE_KACCESS_ERR_ZERO),        \
+                         "("                                           \
+                           EX_DATA_REG(ERR, err) " | "                 \
+                           EX_DATA_REG(ZERO, zero)                     \
+                         ")")
+
 #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err)                     \
        _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr)
 
-#define EX_DATA_REG_DATA_SHIFT 0
-#define EX_DATA_REG_DATA       GENMASK(4, 0)
-#define EX_DATA_REG_ADDR_SHIFT 5
-#define EX_DATA_REG_ADDR       GENMASK(9, 5)
+#define _ASM_EXTABLE_UACCESS(insn, fixup)                              \
+       _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, wzr, wzr)
+
+#define _ASM_EXTABLE_KACCESS_ERR(insn, fixup, err)                     \
+       _ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, err, wzr)
 
 #define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr)           \
        __DEFINE_ASM_GPR_NUMS                                                   \
index 0557af8..75b211c 100644 (file)
@@ -61,7 +61,7 @@ alternative_else_nop_endif
 
 #define USER(l, x...)                          \
 9999:  x;                                      \
-       _asm_extable    9999b, l
+       _asm_extable_uaccess    9999b, l
 
 /*
  * Generate the assembly for LDTR/STTR with exception table entries.
@@ -73,8 +73,8 @@ alternative_else_nop_endif
 8889:          ldtr    \reg2, [\addr, #8];
                add     \addr, \addr, \post_inc;
 
-               _asm_extable    8888b,\l;
-               _asm_extable    8889b,\l;
+               _asm_extable_uaccess    8888b, \l;
+               _asm_extable_uaccess    8889b, \l;
        .endm
 
        .macro user_stp l, reg1, reg2, addr, post_inc
@@ -82,14 +82,14 @@ alternative_else_nop_endif
 8889:          sttr    \reg2, [\addr, #8];
                add     \addr, \addr, \post_inc;
 
-               _asm_extable    8888b,\l;
-               _asm_extable    8889b,\l;
+               _asm_extable_uaccess    8888b,\l;
+               _asm_extable_uaccess    8889b,\l;
        .endm
 
        .macro user_ldst l, inst, reg, addr, post_inc
 8888:          \inst           \reg, [\addr];
                add             \addr, \addr, \post_inc;
 
-               _asm_extable    8888b,\l;
+               _asm_extable_uaccess    8888b, \l;
        .endm
 #endif
index ead62f7..13ecc79 100644 (file)
@@ -59,9 +59,9 @@ alternative_else_nop_endif
 
        .macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
        mrs     \tmp1, id_aa64isar1_el1
-       ubfx    \tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8
+       ubfx    \tmp1, \tmp1, #ID_AA64ISAR1_EL1_APA_SHIFT, #8
        mrs_s   \tmp2, SYS_ID_AA64ISAR2_EL1
-       ubfx    \tmp2, \tmp2, #ID_AA64ISAR2_APA3_SHIFT, #4
+       ubfx    \tmp2, \tmp2, #ID_AA64ISAR2_EL1_APA3_SHIFT, #4
        orr     \tmp1, \tmp1, \tmp2
        cbz     \tmp1, .Lno_addr_auth\@
        mov_q   \tmp1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
index 8c5a61a..5846145 100644 (file)
@@ -360,6 +360,20 @@ alternative_cb_end
        .endm
 
 /*
+ * idmap_get_t0sz - get the T0SZ value needed to cover the ID map
+ *
+ * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
+ * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
+ * this number conveniently equals the number of leading zeroes in
+ * the physical address of _end.
+ */
+       .macro  idmap_get_t0sz, reg
+       adrp    \reg, _end
+       orr     \reg, \reg, #(1 << VA_BITS_MIN) - 1
+       clz     \reg, \reg
+       .endm
+
+/*
  * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
  * ID_AA64MMFR0_EL1.PARange value
  *
@@ -423,7 +437,7 @@ alternative_endif
        b.lo    .Ldcache_op\@
        dsb     \domain
 
-       _cond_extable .Ldcache_op\@, \fixup
+       _cond_uaccess_extable .Ldcache_op\@, \fixup
        .endm
 
 /*
@@ -462,7 +476,19 @@ alternative_endif
        dsb     ish
        isb
 
-       _cond_extable .Licache_op\@, \fixup
+       _cond_uaccess_extable .Licache_op\@, \fixup
+       .endm
+
+/*
+ * load_ttbr1 - install @pgtbl as a TTBR1 page table
+ * pgtbl preserved
+ * tmp1/tmp2 clobbered, either may overlap with pgtbl
+ */
+       .macro          load_ttbr1, pgtbl, tmp1, tmp2
+       phys_to_ttbr    \tmp1, \pgtbl
+       offset_ttbr1    \tmp1, \tmp2
+       msr             ttbr1_el1, \tmp1
+       isb
        .endm
 
 /*
@@ -478,10 +504,7 @@ alternative_endif
        isb
        tlbi    vmalle1
        dsb     nsh
-       phys_to_ttbr \tmp, \page_table
-       offset_ttbr1 \tmp, \tmp2
-       msr     ttbr1_el1, \tmp
-       isb
+       load_ttbr1 \page_table, \tmp, \tmp2
        .endm
 
 /*
index 9f3e2c3..2cfc424 100644 (file)
 #define pmr_sync()     do {} while (0)
 #endif
 
-#define mb()           dsb(sy)
-#define rmb()          dsb(ld)
-#define wmb()          dsb(st)
+#define __mb()         dsb(sy)
+#define __rmb()                dsb(ld)
+#define __wmb()                dsb(st)
 
-#define dma_mb()       dmb(osh)
-#define dma_rmb()      dmb(oshld)
-#define dma_wmb()      dmb(oshst)
+#define __dma_mb()     dmb(osh)
+#define __dma_rmb()    dmb(oshld)
+#define __dma_wmb()    dmb(oshst)
 
 #define io_stop_wc()   dgh()
 
index 7c2181c..ca9b487 100644 (file)
@@ -5,34 +5,9 @@
 #ifndef __ASM_CACHE_H
 #define __ASM_CACHE_H
 
-#include <asm/cputype.h>
-#include <asm/mte-def.h>
-
-#define CTR_L1IP_SHIFT         14
-#define CTR_L1IP_MASK          3
-#define CTR_DMINLINE_SHIFT     16
-#define CTR_IMINLINE_SHIFT     0
-#define CTR_IMINLINE_MASK      0xf
-#define CTR_ERG_SHIFT          20
-#define CTR_CWG_SHIFT          24
-#define CTR_CWG_MASK           15
-#define CTR_IDC_SHIFT          28
-#define CTR_DIC_SHIFT          29
-
-#define CTR_CACHE_MINLINE_MASK \
-       (0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT)
-
-#define CTR_L1IP(ctr)          (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
-
-#define ICACHE_POLICY_VPIPT    0
-#define ICACHE_POLICY_RESERVED 1
-#define ICACHE_POLICY_VIPT     2
-#define ICACHE_POLICY_PIPT     3
-
 #define L1_CACHE_SHIFT         (6)
 #define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
 
-
 #define CLIDR_LOUU_SHIFT       27
 #define CLIDR_LOC_SHIFT                24
 #define CLIDR_LOUIS_SHIFT      21
 #include <linux/bitops.h>
 #include <linux/kasan-enabled.h>
 
+#include <asm/cputype.h>
+#include <asm/mte-def.h>
+#include <asm/sysreg.h>
+
 #ifdef CONFIG_KASAN_SW_TAGS
 #define ARCH_SLAB_MINALIGN     (1ULL << KASAN_SHADOW_SCALE_SHIFT)
 #elif defined(CONFIG_KASAN_HW_TAGS)
@@ -66,6 +45,12 @@ static inline unsigned int arch_slab_minalign(void)
 #define arch_slab_minalign() arch_slab_minalign()
 #endif
 
+#define CTR_CACHE_MINLINE_MASK \
+       (0xf << CTR_EL0_DMINLINE_SHIFT | \
+        CTR_EL0_IMINLINE_MASK << CTR_EL0_IMINLINE_SHIFT)
+
+#define CTR_L1IP(ctr)          SYS_FIELD_GET(CTR_EL0, L1Ip, ctr)
+
 #define ICACHEF_ALIASING       0
 #define ICACHEF_VPIPT          1
 extern unsigned long __icache_flags;
@@ -86,7 +71,7 @@ static __always_inline int icache_is_vpipt(void)
 
 static inline u32 cache_type_cwg(void)
 {
-       return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK;
+       return (read_cpuid_cachetype() >> CTR_EL0_CWG_SHIFT) & CTR_EL0_CWG_MASK;
 }
 
 #define __read_mostly __section(".data..read_mostly")
@@ -120,12 +105,12 @@ static inline u32 __attribute_const__ read_cpuid_effective_cachetype(void)
 {
        u32 ctr = read_cpuid_cachetype();
 
-       if (!(ctr & BIT(CTR_IDC_SHIFT))) {
+       if (!(ctr & BIT(CTR_EL0_IDC_SHIFT))) {
                u64 clidr = read_sysreg(clidr_el1);
 
                if (CLIDR_LOC(clidr) == 0 ||
                    (CLIDR_LOUIS(clidr) == 0 && CLIDR_LOUU(clidr) == 0))
-                       ctr |= BIT(CTR_IDC_SHIFT);
+                       ctr |= BIT(CTR_EL0_IDC_SHIFT);
        }
 
        return ctr;
index 5a228e2..37185e9 100644 (file)
@@ -105,13 +105,6 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
 #define flush_icache_range flush_icache_range
 
 /*
- * Cache maintenance functions used by the DMA API. No to be used directly.
- */
-extern void __dma_map_area(const void *, size_t, int);
-extern void __dma_unmap_area(const void *, size_t, int);
-extern void __dma_flush_area(const void *, size_t);
-
-/*
  * Copy user data from/to a page which is mapped into a different
  * processes address space.  Really, we want to allow our "user
  * space" model to handle this.
index 115cdec..fd7a922 100644 (file)
@@ -46,6 +46,7 @@ struct cpuinfo_arm64 {
        u64             reg_midr;
        u64             reg_revidr;
        u64             reg_gmid;
+       u64             reg_smidr;
 
        u64             reg_id_aa64dfr0;
        u64             reg_id_aa64dfr1;
index e95c4df..a444c89 100644 (file)
  * @cpu_die:   Makes a cpu leave the kernel. Must not fail. Called from the
  *             cpu being killed.
  * @cpu_kill:  Ensures a cpu has left the kernel. Called from another cpu.
- * @cpu_init_idle: Reads any data necessary to initialize CPU idle states for
- *                a proposed logical id.
- * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
- *               to wrong parameters or error conditions. Called from the
- *               CPU being suspended. Must be called with IRQs disabled.
  */
 struct cpu_operations {
        const char      *name;
@@ -49,10 +44,6 @@ struct cpu_operations {
        void            (*cpu_die)(unsigned int cpu);
        int             (*cpu_kill)(unsigned int cpu);
 #endif
-#ifdef CONFIG_CPU_IDLE
-       int             (*cpu_init_idle)(unsigned int);
-       int             (*cpu_suspend)(unsigned long);
-#endif
 };
 
 int __init init_cpu_ops(int cpu);
index 14a8f3d..fd7d75a 100644 (file)
@@ -11,7 +11,7 @@
 #include <asm/hwcap.h>
 #include <asm/sysreg.h>
 
-#define MAX_CPU_FEATURES       64
+#define MAX_CPU_FEATURES       128
 #define cpu_feature(x)         KERNEL_HWCAP_ ## x
 
 #ifndef __ASSEMBLY__
@@ -673,7 +673,7 @@ static inline bool supports_clearbhb(int scope)
                isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
 
        return cpuid_feature_extract_unsigned_field(isar2,
-                                                   ID_AA64ISAR2_CLEARBHB_SHIFT);
+                                                   ID_AA64ISAR2_EL1_BC_SHIFT);
 }
 
 const struct cpumask *system_32bit_el0_cpumask(void);
@@ -908,7 +908,10 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
 }
 
 extern struct arm64_ftr_override id_aa64mmfr1_override;
+extern struct arm64_ftr_override id_aa64pfr0_override;
 extern struct arm64_ftr_override id_aa64pfr1_override;
+extern struct arm64_ftr_override id_aa64zfr0_override;
+extern struct arm64_ftr_override id_aa64smfr0_override;
 extern struct arm64_ftr_override id_aa64isar1_override;
 extern struct arm64_ftr_override id_aa64isar2_override;
 
index 14a19d1..2047713 100644 (file)
@@ -4,21 +4,6 @@
 
 #include <asm/proc-fns.h>
 
-#ifdef CONFIG_CPU_IDLE
-extern int arm_cpuidle_init(unsigned int cpu);
-extern int arm_cpuidle_suspend(int index);
-#else
-static inline int arm_cpuidle_init(unsigned int cpu)
-{
-       return -EOPNOTSUPP;
-}
-
-static inline int arm_cpuidle_suspend(int index)
-{
-       return -EOPNOTSUPP;
-}
-#endif
-
 #ifdef CONFIG_ARM64_PSEUDO_NMI
 #include <asm/arch_gicv3.h>
 
index 34ceff0..2630faa 100644 (file)
        msr     cptr_el2, x0                    // Disable copro. traps to EL2
 .endm
 
-/* SVE register access */
-.macro __init_el2_nvhe_sve
-       mrs     x1, id_aa64pfr0_el1
-       ubfx    x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
-       cbz     x1, .Lskip_sve_\@
-
-       bic     x0, x0, #CPTR_EL2_TZ            // Also disable SVE traps
-       msr     cptr_el2, x0                    // Disable copro. traps to EL2
-       isb
-       mov     x1, #ZCR_ELx_LEN_MASK           // SVE: Enable full vector
-       msr_s   SYS_ZCR_EL2, x1                 // length for EL1.
-.Lskip_sve_\@:
-.endm
-
-/* SME register access and priority mapping */
-.macro __init_el2_nvhe_sme
-       mrs     x1, id_aa64pfr1_el1
-       ubfx    x1, x1, #ID_AA64PFR1_SME_SHIFT, #4
-       cbz     x1, .Lskip_sme_\@
-
-       bic     x0, x0, #CPTR_EL2_TSM           // Also disable SME traps
-       msr     cptr_el2, x0                    // Disable copro. traps to EL2
-       isb
-
-       mrs     x1, sctlr_el2
-       orr     x1, x1, #SCTLR_ELx_ENTP2        // Disable TPIDR2 traps
-       msr     sctlr_el2, x1
-       isb
-
-       mov     x1, #0                          // SMCR controls
-
-       mrs_s   x2, SYS_ID_AA64SMFR0_EL1
-       ubfx    x2, x2, #ID_AA64SMFR0_FA64_SHIFT, #1 // Full FP in SM?
-       cbz     x2, .Lskip_sme_fa64_\@
-
-       orr     x1, x1, SMCR_ELx_FA64_MASK
-.Lskip_sme_fa64_\@:
-
-       orr     x1, x1, #SMCR_ELx_LEN_MASK      // Enable full SME vector
-       msr_s   SYS_SMCR_EL2, x1                // length for EL1.
-
-       mrs_s   x1, SYS_SMIDR_EL1               // Priority mapping supported?
-       ubfx    x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1
-       cbz     x1, .Lskip_sme_\@
-
-       msr_s   SYS_SMPRIMAP_EL2, xzr           // Make all priorities equal
-
-       mrs     x1, id_aa64mmfr1_el1            // HCRX_EL2 present?
-       ubfx    x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4
-       cbz     x1, .Lskip_sme_\@
-
-       mrs_s   x1, SYS_HCRX_EL2
-       orr     x1, x1, #HCRX_EL2_SMPME_MASK    // Enable priority mapping
-       msr_s   SYS_HCRX_EL2, x1
-
-.Lskip_sme_\@:
-.endm
-
 /* Disable any fine grained traps */
 .macro __init_el2_fgt
        mrs     x1, id_aa64mmfr0_el1
        __init_el2_hstr
        __init_el2_nvhe_idregs
        __init_el2_nvhe_cptr
-       __init_el2_nvhe_sve
-       __init_el2_nvhe_sme
        __init_el2_fgt
        __init_el2_nvhe_prepare_eret
 .endm
index daff882..71ed5fd 100644 (file)
@@ -62,10 +62,12 @@ enum fixed_addresses {
 #endif /* CONFIG_ACPI_APEI_GHES */
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#ifdef CONFIG_RELOCATABLE
+       FIX_ENTRY_TRAMP_TEXT4,  /* one extra slot for the data page */
+#endif
        FIX_ENTRY_TRAMP_TEXT3,
        FIX_ENTRY_TRAMP_TEXT2,
        FIX_ENTRY_TRAMP_TEXT1,
-       FIX_ENTRY_TRAMP_DATA,
 #define TRAMP_VALIAS           (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1))
 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
        __end_of_permanent_fixed_addresses,
index aa443d8..cef4ae7 100644 (file)
@@ -85,7 +85,7 @@
 #define KERNEL_HWCAP_PACA              __khwcap_feature(PACA)
 #define KERNEL_HWCAP_PACG              __khwcap_feature(PACG)
 
-#define __khwcap2_feature(x)           (const_ilog2(HWCAP2_ ## x) + 32)
+#define __khwcap2_feature(x)           (const_ilog2(HWCAP2_ ## x) + 64)
 #define KERNEL_HWCAP_DCPODP            __khwcap2_feature(DCPODP)
 #define KERNEL_HWCAP_SVE2              __khwcap2_feature(SVE2)
 #define KERNEL_HWCAP_SVEAES            __khwcap2_feature(SVEAES)
 #define KERNEL_HWCAP_SME_F32F32                __khwcap2_feature(SME_F32F32)
 #define KERNEL_HWCAP_SME_FA64          __khwcap2_feature(SME_FA64)
 #define KERNEL_HWCAP_WFXT              __khwcap2_feature(WFXT)
+#define KERNEL_HWCAP_EBF16             __khwcap2_feature(EBF16)
 
 /*
  * This yields a mask that user programs can use to figure out what
index 3995652..87dd42d 100644 (file)
@@ -163,13 +163,16 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
 /*
  * I/O memory mapping functions.
  */
-extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
-extern void iounmap(volatile void __iomem *addr);
-extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
 
-#define ioremap(addr, size)            __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
-#define ioremap_wc(addr, size)         __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
-#define ioremap_np(addr, size)         __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
+bool ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot);
+#define ioremap_allowed ioremap_allowed
+
+#define _PAGE_IOREMAP PROT_DEVICE_nGnRE
+
+#define ioremap_wc(addr, size) \
+       ioremap_prot((addr), (size), PROT_NORMAL_NC)
+#define ioremap_np(addr, size) \
+       ioremap_prot((addr), (size), PROT_DEVICE_nGnRnE)
 
 /*
  * io{read,write}{16,32,64}be() macros
@@ -184,6 +187,15 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
 
 #include <asm-generic/io.h>
 
+#define ioremap_cache ioremap_cache
+static inline void __iomem *ioremap_cache(phys_addr_t addr, size_t size)
+{
+       if (pfn_is_map_memory(__phys_to_pfn(addr)))
+               return (void __iomem *)__phys_to_virt(addr);
+
+       return ioremap_prot(addr, size, PROT_NORMAL);
+}
+
 /*
  * More restrictive address range checking than the default implementation
  * (PHYS_OFFSET and PHYS_MASK taken into account).
index 96dc0f7..02e59fa 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef __ASM_KERNEL_PGTABLE_H
 #define __ASM_KERNEL_PGTABLE_H
 
+#include <asm/boot.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/sparsemem.h>
 
  */
 #if ARM64_KERNEL_USES_PMD_MAPS
 #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
-#define IDMAP_PGTABLE_LEVELS   (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT) - 1)
 #else
 #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS)
-#define IDMAP_PGTABLE_LEVELS   (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT))
 #endif
 
 
                        + EARLY_PUDS((vstart), (vend))  /* each PUD needs a next level page table */    \
                        + EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */
 #define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end))
-#define IDMAP_DIR_SIZE         (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
+
+/* the initial ID map may need two extra pages if it needs to be extended */
+#if VA_BITS < 48
+#define INIT_IDMAP_DIR_SIZE    ((INIT_IDMAP_DIR_PAGES + 2) * PAGE_SIZE)
+#else
+#define INIT_IDMAP_DIR_SIZE    (INIT_IDMAP_DIR_PAGES * PAGE_SIZE)
+#endif
+#define INIT_IDMAP_DIR_PAGES   EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE)
 
 /* Initial memory map size */
 #if ARM64_KERNEL_USES_PMD_MAPS
 #define SWAPPER_PMD_FLAGS      (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
 
 #if ARM64_KERNEL_USES_PMD_MAPS
-#define SWAPPER_MM_MMUFLAGS    (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
+#define SWAPPER_RW_MMUFLAGS    (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
+#define SWAPPER_RX_MMUFLAGS    (SWAPPER_RW_MMUFLAGS | PMD_SECT_RDONLY)
 #else
-#define SWAPPER_MM_MMUFLAGS    (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
+#define SWAPPER_RW_MMUFLAGS    (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
+#define SWAPPER_RX_MMUFLAGS    (SWAPPER_RW_MMUFLAGS | PTE_RDONLY)
 #endif
 
 /*
index 47a1e25..de32152 100644 (file)
@@ -363,11 +363,6 @@ struct kvm_vcpu_arch {
        struct kvm_pmu pmu;
 
        /*
-        * Anything that is not used directly from assembly code goes
-        * here.
-        */
-
-       /*
         * Guest registers we preserve during guest debugging.
         *
         * These shadow registers are updated by the kvm_handle_sys_reg
index 0af70d9..227d256 100644 (file)
 #include <linux/types.h>
 #include <asm/bug.h>
 
+#if VA_BITS > 48
 extern u64                     vabits_actual;
+#else
+#define vabits_actual          ((u64)VA_BITS)
+#endif
 
 extern s64                     memstart_addr;
 /* PHYS_OFFSET - the physical address of the start of memory. */
@@ -351,6 +355,11 @@ static inline void *phys_to_virt(phys_addr_t x)
 })
 
 void dump_mem_limit(void);
+
+static inline bool defer_reserve_crashkernel(void)
+{
+       return IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32);
+}
 #endif /* !ASSEMBLY */
 
 /*
index 6770667..c7ccd82 100644 (file)
@@ -60,8 +60,7 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
  * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
  * physical memory, in which case it will be smaller.
  */
-extern u64 idmap_t0sz;
-extern u64 idmap_ptrs_per_pgd;
+extern int idmap_t0sz;
 
 /*
  * Ensure TCR.T0SZ is set to the provided value.
@@ -106,13 +105,18 @@ static inline void cpu_uninstall_idmap(void)
                cpu_switch_mm(mm->pgd, mm);
 }
 
-static inline void cpu_install_idmap(void)
+static inline void __cpu_install_idmap(pgd_t *idmap)
 {
        cpu_set_reserved_ttbr0();
        local_flush_tlb_all();
        cpu_set_idmap_tcr_t0sz();
 
-       cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
+       cpu_switch_mm(lm_alias(idmap), &init_mm);
+}
+
+static inline void cpu_install_idmap(void)
+{
+       __cpu_install_idmap(idmap_pg_dir);
 }
 
 /*
@@ -143,7 +147,7 @@ static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
  * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
  * avoiding the possibility of conflicting TLB entries being allocated.
  */
-static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp)
+static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
 {
        typedef void (ttbr_replace_func)(phys_addr_t);
        extern ttbr_replace_func idmap_cpu_replace_ttbr1;
@@ -166,7 +170,7 @@ static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp)
 
        replace_phys = (void *)__pa_symbol(function_nocfi(idmap_cpu_replace_ttbr1));
 
-       cpu_install_idmap();
+       __cpu_install_idmap(idmap);
        replace_phys(ttbr1);
        cpu_uninstall_idmap();
 }
index dd3d12b..5ab8d16 100644 (file)
  */
 #ifdef CONFIG_ARM64_PA_BITS_52
 /*
- * This should be GENMASK_ULL(47, 2).
  * TTBR_ELx[1] is RES0 in this configuration.
  */
-#define TTBR_BADDR_MASK_52     (((UL(1) << 46) - 1) << 2)
+#define TTBR_BADDR_MASK_52     GENMASK_ULL(47, 2)
 #endif
 
 #ifdef CONFIG_ARM64_VA_BITS_52
index 0b6632f..b5df82a 100644 (file)
        __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+static inline bool arch_thp_swp_supported(void)
+{
+       return !system_supports_mte();
+}
+#define arch_thp_swp_supported arch_thp_swp_supported
+
 /*
  * Outside of a few very special situations (e.g. hibernation), we always
  * use broadcast TLB invalidation instructions, therefore a spurious page
@@ -427,6 +433,16 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
        return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
 }
 
+/*
+ * Select all bits except the pfn
+ */
+static inline pgprot_t pte_pgprot(pte_t pte)
+{
+       unsigned long pfn = pte_pfn(pte);
+
+       return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
+}
+
 #ifdef CONFIG_NUMA_BALANCING
 /*
  * See the comment in include/linux/pgtable.h
index 9e58749..86eb0bf 100644 (file)
@@ -272,8 +272,9 @@ void tls_preserve_current_state(void);
 
 static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
 {
+       s32 previous_syscall = regs->syscallno;
        memset(regs, 0, sizeof(*regs));
-       forget_syscall(regs);
+       regs->syscallno = previous_syscall;
        regs->pc = pc;
 
        if (system_uses_irq_prio_masking())
index 42ff95d..7c71358 100644 (file)
 
 #define SYS_ID_AA64PFR0_EL1            sys_reg(3, 0, 0, 4, 0)
 #define SYS_ID_AA64PFR1_EL1            sys_reg(3, 0, 0, 4, 1)
-#define SYS_ID_AA64ZFR0_EL1            sys_reg(3, 0, 0, 4, 4)
-#define SYS_ID_AA64SMFR0_EL1           sys_reg(3, 0, 0, 4, 5)
 
 #define SYS_ID_AA64DFR0_EL1            sys_reg(3, 0, 0, 5, 0)
 #define SYS_ID_AA64DFR1_EL1            sys_reg(3, 0, 0, 5, 1)
 #define SYS_ID_AA64AFR0_EL1            sys_reg(3, 0, 0, 5, 4)
 #define SYS_ID_AA64AFR1_EL1            sys_reg(3, 0, 0, 5, 5)
 
-#define SYS_ID_AA64ISAR1_EL1           sys_reg(3, 0, 0, 6, 1)
-#define SYS_ID_AA64ISAR2_EL1           sys_reg(3, 0, 0, 6, 2)
-
 #define SYS_ID_AA64MMFR0_EL1           sys_reg(3, 0, 0, 7, 0)
 #define SYS_ID_AA64MMFR1_EL1           sys_reg(3, 0, 0, 7, 1)
 #define SYS_ID_AA64MMFR2_EL1           sys_reg(3, 0, 0, 7, 2)
 #define SYS_MAIR_EL1                   sys_reg(3, 0, 10, 2, 0)
 #define SYS_AMAIR_EL1                  sys_reg(3, 0, 10, 3, 0)
 
-#define SYS_LORSA_EL1                  sys_reg(3, 0, 10, 4, 0)
-#define SYS_LOREA_EL1                  sys_reg(3, 0, 10, 4, 1)
-#define SYS_LORN_EL1                   sys_reg(3, 0, 10, 4, 2)
-#define SYS_LORC_EL1                   sys_reg(3, 0, 10, 4, 3)
-#define SYS_LORID_EL1                  sys_reg(3, 0, 10, 4, 7)
-
 #define SYS_VBAR_EL1                   sys_reg(3, 0, 12, 0, 0)
 #define SYS_DISR_EL1                   sys_reg(3, 0, 12, 1, 1)
 
 #define SYS_CNTKCTL_EL1                        sys_reg(3, 0, 14, 1, 0)
 
 #define SYS_CCSIDR_EL1                 sys_reg(3, 1, 0, 0, 0)
-#define SYS_GMID_EL1                   sys_reg(3, 1, 0, 0, 4)
 #define SYS_AIDR_EL1                   sys_reg(3, 1, 0, 0, 7)
 
 #define SMIDR_EL1_IMPLEMENTER_SHIFT    24
 #define SMIDR_EL1_SMPS_SHIFT   15
 #define SMIDR_EL1_AFFINITY_SHIFT       0
 
-#define SYS_CTR_EL0                    sys_reg(3, 3, 0, 0, 1)
-#define SYS_DCZID_EL0                  sys_reg(3, 3, 0, 0, 7)
-
 #define SYS_RNDR_EL0                   sys_reg(3, 3, 2, 4, 0)
 #define SYS_RNDRRS_EL0                 sys_reg(3, 3, 2, 4, 1)
 
 /* Position the attr at the correct index */
 #define MAIR_ATTRIDX(attr, idx)                ((attr) << ((idx) * 8))
 
-/* id_aa64isar1 */
-#define ID_AA64ISAR1_I8MM_SHIFT                52
-#define ID_AA64ISAR1_DGH_SHIFT         48
-#define ID_AA64ISAR1_BF16_SHIFT                44
-#define ID_AA64ISAR1_SPECRES_SHIFT     40
-#define ID_AA64ISAR1_SB_SHIFT          36
-#define ID_AA64ISAR1_FRINTTS_SHIFT     32
-#define ID_AA64ISAR1_GPI_SHIFT         28
-#define ID_AA64ISAR1_GPA_SHIFT         24
-#define ID_AA64ISAR1_LRCPC_SHIFT       20
-#define ID_AA64ISAR1_FCMA_SHIFT                16
-#define ID_AA64ISAR1_JSCVT_SHIFT       12
-#define ID_AA64ISAR1_API_SHIFT         8
-#define ID_AA64ISAR1_APA_SHIFT         4
-#define ID_AA64ISAR1_DPB_SHIFT         0
-
-#define ID_AA64ISAR1_APA_NI                    0x0
-#define ID_AA64ISAR1_APA_ARCHITECTED           0x1
-#define ID_AA64ISAR1_APA_ARCH_EPAC             0x2
-#define ID_AA64ISAR1_APA_ARCH_EPAC2            0x3
-#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC       0x4
-#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB   0x5
-#define ID_AA64ISAR1_API_NI                    0x0
-#define ID_AA64ISAR1_API_IMP_DEF               0x1
-#define ID_AA64ISAR1_API_IMP_DEF_EPAC          0x2
-#define ID_AA64ISAR1_API_IMP_DEF_EPAC2         0x3
-#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC    0x4
-#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB        0x5
-#define ID_AA64ISAR1_GPA_NI                    0x0
-#define ID_AA64ISAR1_GPA_ARCHITECTED           0x1
-#define ID_AA64ISAR1_GPI_NI                    0x0
-#define ID_AA64ISAR1_GPI_IMP_DEF               0x1
-
-/* id_aa64isar2 */
-#define ID_AA64ISAR2_CLEARBHB_SHIFT    28
-#define ID_AA64ISAR2_APA3_SHIFT                12
-#define ID_AA64ISAR2_GPA3_SHIFT                8
-#define ID_AA64ISAR2_RPRES_SHIFT       4
-#define ID_AA64ISAR2_WFXT_SHIFT                0
-
-#define ID_AA64ISAR2_RPRES_8BIT                0x0
-#define ID_AA64ISAR2_RPRES_12BIT       0x1
-/*
- * Value 0x1 has been removed from the architecture, and is
- * reserved, but has not yet been removed from the ARM ARM
- * as of ARM DDI 0487G.b.
- */
-#define ID_AA64ISAR2_WFXT_NI           0x0
-#define ID_AA64ISAR2_WFXT_SUPPORTED    0x2
-
-#define ID_AA64ISAR2_APA3_NI                   0x0
-#define ID_AA64ISAR2_APA3_ARCHITECTED          0x1
-#define ID_AA64ISAR2_APA3_ARCH_EPAC            0x2
-#define ID_AA64ISAR2_APA3_ARCH_EPAC2           0x3
-#define ID_AA64ISAR2_APA3_ARCH_EPAC2_FPAC      0x4
-#define ID_AA64ISAR2_APA3_ARCH_EPAC2_FPAC_CMB  0x5
-
-#define ID_AA64ISAR2_GPA3_NI                   0x0
-#define ID_AA64ISAR2_GPA3_ARCHITECTED          0x1
-
 /* id_aa64pfr0 */
 #define ID_AA64PFR0_CSV3_SHIFT         60
 #define ID_AA64PFR0_CSV2_SHIFT         56
 #define ID_AA64PFR1_MTE                        0x2
 #define ID_AA64PFR1_MTE_ASYMM          0x3
 
-/* id_aa64zfr0 */
-#define ID_AA64ZFR0_F64MM_SHIFT                56
-#define ID_AA64ZFR0_F32MM_SHIFT                52
-#define ID_AA64ZFR0_I8MM_SHIFT         44
-#define ID_AA64ZFR0_SM4_SHIFT          40
-#define ID_AA64ZFR0_SHA3_SHIFT         32
-#define ID_AA64ZFR0_BF16_SHIFT         20
-#define ID_AA64ZFR0_BITPERM_SHIFT      16
-#define ID_AA64ZFR0_AES_SHIFT          4
-#define ID_AA64ZFR0_SVEVER_SHIFT       0
-
-#define ID_AA64ZFR0_F64MM              0x1
-#define ID_AA64ZFR0_F32MM              0x1
-#define ID_AA64ZFR0_I8MM               0x1
-#define ID_AA64ZFR0_BF16               0x1
-#define ID_AA64ZFR0_SM4                        0x1
-#define ID_AA64ZFR0_SHA3               0x1
-#define ID_AA64ZFR0_BITPERM            0x1
-#define ID_AA64ZFR0_AES                        0x1
-#define ID_AA64ZFR0_AES_PMULL          0x2
-#define ID_AA64ZFR0_SVEVER_SVE2                0x1
-
-/* id_aa64smfr0 */
-#define ID_AA64SMFR0_FA64_SHIFT                63
-#define ID_AA64SMFR0_I16I64_SHIFT      52
-#define ID_AA64SMFR0_F64F64_SHIFT      48
-#define ID_AA64SMFR0_I8I32_SHIFT       36
-#define ID_AA64SMFR0_F16F32_SHIFT      35
-#define ID_AA64SMFR0_B16F32_SHIFT      34
-#define ID_AA64SMFR0_F32F32_SHIFT      32
-
-#define ID_AA64SMFR0_FA64              0x1
-#define ID_AA64SMFR0_I16I64            0xf
-#define ID_AA64SMFR0_F64F64            0x1
-#define ID_AA64SMFR0_I8I32             0xf
-#define ID_AA64SMFR0_F16F32            0x1
-#define ID_AA64SMFR0_B16F32            0x1
-#define ID_AA64SMFR0_F32F32            0x1
-
 /* id_aa64mmfr0 */
 #define ID_AA64MMFR0_ECV_SHIFT         60
 #define ID_AA64MMFR0_FGT_SHIFT         56
 
 /* id_aa64mmfr1 */
 #define ID_AA64MMFR1_ECBHB_SHIFT       60
+#define ID_AA64MMFR1_TIDCP1_SHIFT      52
 #define ID_AA64MMFR1_HCX_SHIFT         40
 #define ID_AA64MMFR1_AFP_SHIFT         44
 #define ID_AA64MMFR1_ETS_SHIFT         36
 #define ID_AA64MMFR1_VMIDBITS_8                0
 #define ID_AA64MMFR1_VMIDBITS_16       2
 
+#define ID_AA64MMFR1_TIDCP1_NI         0
+#define ID_AA64MMFR1_TIDCP1_IMP                1
+
 /* id_aa64mmfr2 */
 #define ID_AA64MMFR2_E0PD_SHIFT                60
 #define ID_AA64MMFR2_EVT_SHIFT         56
 #define MVFR2_FPMISC_SHIFT             4
 #define MVFR2_SIMDMISC_SHIFT           0
 
-#define DCZID_DZP_SHIFT                        4
-#define DCZID_BS_SHIFT                 0
-
 #define CPACR_EL1_FPEN_EL1EN   (BIT(20)) /* enable EL1 access */
 #define CPACR_EL1_FPEN_EL0EN   (BIT(21)) /* enable EL0 access, if EL1EN set */
 
 #define SYS_RGSR_EL1_SEED_MASK 0xffffUL
 
 /* GMID_EL1 field definitions */
-#define SYS_GMID_EL1_BS_SHIFT  0
-#define SYS_GMID_EL1_BS_SIZE   4
+#define GMID_EL1_BS_SHIFT      0
+#define GMID_EL1_BS_SIZE       4
 
 /* TFSR{,E0}_EL1 bit definitions */
 #define SYS_TFSR_EL1_TF0_SHIFT 0
 
 #endif
 
+#define SYS_FIELD_GET(reg, field, val)         \
+                FIELD_GET(reg##_##field##_MASK, val)
+
 #define SYS_FIELD_PREP(reg, field, val)                \
                 FIELD_PREP(reg##_##field##_MASK, val)
 
index 63f9c82..2fc9f08 100644 (file)
@@ -232,34 +232,34 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
  * The "__xxx_error" versions set the third argument to -EFAULT if an error
  * occurs, and leave it unchanged on success.
  */
-#define __get_mem_asm(load, reg, x, addr, err)                         \
+#define __get_mem_asm(load, reg, x, addr, err, type)                   \
        asm volatile(                                                   \
        "1:     " load "        " reg "1, [%2]\n"                       \
        "2:\n"                                                          \
-       _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %w0, %w1)                 \
+       _ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1)          \
        : "+r" (err), "=&r" (x)                                         \
        : "r" (addr))
 
-#define __raw_get_mem(ldr, x, ptr, err)                                        \
-do {                                                                   \
-       unsigned long __gu_val;                                         \
-       switch (sizeof(*(ptr))) {                                       \
-       case 1:                                                         \
-               __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err));   \
-               break;                                                  \
-       case 2:                                                         \
-               __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err));   \
-               break;                                                  \
-       case 4:                                                         \
-               __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err));       \
-               break;                                                  \
-       case 8:                                                         \
-               __get_mem_asm(ldr, "%x",  __gu_val, (ptr), (err));      \
-               break;                                                  \
-       default:                                                        \
-               BUILD_BUG();                                            \
-       }                                                               \
-       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
+#define __raw_get_mem(ldr, x, ptr, err, type)                                  \
+do {                                                                           \
+       unsigned long __gu_val;                                                 \
+       switch (sizeof(*(ptr))) {                                               \
+       case 1:                                                                 \
+               __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err), type);     \
+               break;                                                          \
+       case 2:                                                                 \
+               __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err), type);     \
+               break;                                                          \
+       case 4:                                                                 \
+               __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err), type);         \
+               break;                                                          \
+       case 8:                                                                 \
+               __get_mem_asm(ldr, "%x",  __gu_val, (ptr), (err), type);        \
+               break;                                                          \
+       default:                                                                \
+               BUILD_BUG();                                                    \
+       }                                                                       \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
 } while (0)
 
 /*
@@ -274,7 +274,7 @@ do {                                                                        \
        __chk_user_ptr(ptr);                                            \
                                                                        \
        uaccess_ttbr0_enable();                                         \
-       __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err);               \
+       __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err, U);            \
        uaccess_ttbr0_disable();                                        \
                                                                        \
        (x) = __rgu_val;                                                \
@@ -314,40 +314,40 @@ do {                                                                      \
                                                                        \
        __uaccess_enable_tco_async();                                   \
        __raw_get_mem("ldr", *((type *)(__gkn_dst)),                    \
-                     (__force type *)(__gkn_src), __gkn_err);          \
+                     (__force type *)(__gkn_src), __gkn_err, K);       \
        __uaccess_disable_tco_async();                                  \
                                                                        \
        if (unlikely(__gkn_err))                                        \
                goto err_label;                                         \
 } while (0)
 
-#define __put_mem_asm(store, reg, x, addr, err)                                \
+#define __put_mem_asm(store, reg, x, addr, err, type)                  \
        asm volatile(                                                   \
        "1:     " store "       " reg "1, [%2]\n"                       \
        "2:\n"                                                          \
-       _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)                           \
+       _ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0)                    \
        : "+r" (err)                                                    \
        : "r" (x), "r" (addr))
 
-#define __raw_put_mem(str, x, ptr, err)                                        \
-do {                                                                   \
-       __typeof__(*(ptr)) __pu_val = (x);                              \
-       switch (sizeof(*(ptr))) {                                       \
-       case 1:                                                         \
-               __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err));   \
-               break;                                                  \
-       case 2:                                                         \
-               __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err));   \
-               break;                                                  \
-       case 4:                                                         \
-               __put_mem_asm(str, "%w", __pu_val, (ptr), (err));       \
-               break;                                                  \
-       case 8:                                                         \
-               __put_mem_asm(str, "%x", __pu_val, (ptr), (err));       \
-               break;                                                  \
-       default:                                                        \
-               BUILD_BUG();                                            \
-       }                                                               \
+#define __raw_put_mem(str, x, ptr, err, type)                                  \
+do {                                                                           \
+       __typeof__(*(ptr)) __pu_val = (x);                                      \
+       switch (sizeof(*(ptr))) {                                               \
+       case 1:                                                                 \
+               __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err), type);     \
+               break;                                                          \
+       case 2:                                                                 \
+               __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err), type);     \
+               break;                                                          \
+       case 4:                                                                 \
+               __put_mem_asm(str, "%w", __pu_val, (ptr), (err), type);         \
+               break;                                                          \
+       case 8:                                                                 \
+               __put_mem_asm(str, "%x", __pu_val, (ptr), (err), type);         \
+               break;                                                          \
+       default:                                                                \
+               BUILD_BUG();                                                    \
+       }                                                                       \
 } while (0)
 
 /*
@@ -362,7 +362,7 @@ do {                                                                        \
        __chk_user_ptr(__rpu_ptr);                                      \
                                                                        \
        uaccess_ttbr0_enable();                                         \
-       __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err);               \
+       __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err, U);            \
        uaccess_ttbr0_disable();                                        \
 } while (0)
 
@@ -400,7 +400,7 @@ do {                                                                        \
                                                                        \
        __uaccess_enable_tco_async();                                   \
        __raw_put_mem("str", *((type *)(__pkn_src)),                    \
-                     (__force type *)(__pkn_dst), __pkn_err);          \
+                     (__force type *)(__pkn_dst), __pkn_err, K);       \
        __uaccess_disable_tco_async();                                  \
                                                                        \
        if (unlikely(__pkn_err))                                        \
index 3c8af03..4eb601e 100644 (file)
@@ -36,9 +36,9 @@
 #define HVC_RESET_VECTORS 2
 
 /*
- * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible
+ * HVC_FINALISE_EL2 - Upgrade the CPU from EL1 to EL2, if possible
  */
-#define HVC_VHE_RESTART        3
+#define HVC_FINALISE_EL2       3
 
 /* Max number of HYP stub hypercalls */
 #define HVC_STUB_HCALL_NR 4
 #define BOOT_CPU_MODE_EL1      (0xe11)
 #define BOOT_CPU_MODE_EL2      (0xe12)
 
+/*
+ * Flags returned together with the boot mode, but not preserved in
+ * __boot_cpu_mode. Used by the idreg override code to work out the
+ * boot state.
+ */
+#define BOOT_CPU_FLAG_E2H      BIT_ULL(32)
+
 #ifndef __ASSEMBLY__
 
 #include <asm/ptrace.h>
@@ -113,6 +120,9 @@ static __always_inline bool has_vhe(void)
        /*
         * Code only run in VHE/NVHE hyp context can assume VHE is present or
         * absent. Otherwise fall back to caps.
+        * This allows the compiler to discard VHE-specific code from the
+        * nVHE object, reducing the number of external symbol references
+        * needed to link.
         */
        if (is_vhe_hyp_code())
                return true;
index 4bb2cc8..1ad2568 100644 (file)
@@ -19,6 +19,9 @@
 
 /*
  * HWCAP flags - for AT_HWCAP
+ *
+ * Bits 62 and 63 are reserved for use by libc.
+ * Bits 32-61 are unallocated for potential use by libc.
  */
 #define HWCAP_FP               (1 << 0)
 #define HWCAP_ASIMD            (1 << 1)
@@ -88,5 +91,6 @@
 #define HWCAP2_SME_F32F32      (1 << 29)
 #define HWCAP2_SME_FA64                (1 << 30)
 #define HWCAP2_WFXT            (1UL << 31)
+#define HWCAP2_EBF16           (1UL << 32)
 
 #endif /* _UAPI__ASM_HWCAP_H */
index fa7981d..1add7b0 100644 (file)
@@ -14,6 +14,11 @@ CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_syscall.o         = -fstack-protector -fstack-protector-strong
 CFLAGS_syscall.o       += -fno-stack-protector
 
+# When KASAN is enabled, a stack trace is recorded for every alloc/free, which
+# can significantly impact performance. Avoid instrumenting the stack trace
+# collection code to minimize this impact.
+KASAN_SANITIZE_stacktrace.o := n
+
 # It's not safe to invoke KCOV when portions of the kernel environment aren't
 # available or are out-of-sync with HW state. Since `noinstr` doesn't always
 # inhibit KCOV instrumentation, disable it for the entire compilation unit.
@@ -59,7 +64,7 @@ obj-$(CONFIG_ACPI)                    += acpi.o
 obj-$(CONFIG_ACPI_NUMA)                        += acpi_numa.o
 obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)      += acpi_parking_protocol.o
 obj-$(CONFIG_PARAVIRT)                 += paravirt.o
-obj-$(CONFIG_RANDOMIZE_BASE)           += kaslr.o
+obj-$(CONFIG_RANDOMIZE_BASE)           += kaslr.o pi/
 obj-$(CONFIG_HIBERNATION)              += hibernate.o hibernate-asm.o
 obj-$(CONFIG_ELF_CORE)                 += elfcore.o
 obj-$(CONFIG_KEXEC_CORE)               += machine_kexec.o relocate_kernel.o    \
index e4dea8d..a5a256e 100644 (file)
@@ -351,7 +351,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
                                prot = __acpi_get_writethrough_mem_attribute();
                }
        }
-       return __ioremap(phys, size, prot);
+       return ioremap_prot(phys, size, pgprot_val(prot));
 }
 
 /*
index fdfecf0..e51535a 100644 (file)
@@ -109,7 +109,7 @@ void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa)
        pxm = pa->proximity_domain;
        node = acpi_map_pxm_to_node(pxm);
 
-       if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
+       if (node == NUMA_NO_NODE) {
                pr_err("SRAT: Too many proximity domains %d\n", pxm);
                bad_srat();
                return;
index 7bbf510..9bcaa5e 100644 (file)
@@ -121,7 +121,7 @@ static void clean_dcache_range_nopatch(u64 start, u64 end)
 
        ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
        d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
-                                                          CTR_DMINLINE_SHIFT);
+                                                          CTR_EL0_DminLine_SHIFT);
        cur = start & ~(d_size - 1);
        do {
                /*
index 6875a16..fb0e7c7 100644 (file)
@@ -59,6 +59,7 @@ struct insn_emulation {
 static LIST_HEAD(insn_emulation);
 static int nr_insn_emulated __initdata;
 static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
+static DEFINE_MUTEX(insn_emulation_mutex);
 
 static void register_emulation_hooks(struct insn_emulation_ops *ops)
 {
@@ -207,10 +208,10 @@ static int emulation_proc_handler(struct ctl_table *table, int write,
                                  loff_t *ppos)
 {
        int ret = 0;
-       struct insn_emulation *insn = (struct insn_emulation *) table->data;
+       struct insn_emulation *insn = container_of(table->data, struct insn_emulation, current_mode);
        enum insn_emulation_mode prev_mode = insn->current_mode;
 
-       table->data = &insn->current_mode;
+       mutex_lock(&insn_emulation_mutex);
        ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 
        if (ret || !write || prev_mode == insn->current_mode)
@@ -223,7 +224,7 @@ static int emulation_proc_handler(struct ctl_table *table, int write,
                update_insn_emulation_mode(insn, INSN_UNDEF);
        }
 ret:
-       table->data = insn;
+       mutex_unlock(&insn_emulation_mutex);
        return ret;
 }
 
@@ -247,7 +248,7 @@ static void __init register_insn_emulation_sysctl(void)
                sysctl->maxlen = sizeof(int);
 
                sysctl->procname = insn->ops->name;
-               sysctl->data = insn;
+               sysctl->data = &insn->current_mode;
                sysctl->extra1 = &insn->min;
                sysctl->extra2 = &insn->max;
                sysctl->proc_handler = emulation_proc_handler;
index c05cc3b..7e6289e 100644 (file)
@@ -187,7 +187,7 @@ has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
                                int scope)
 {
        u32 midr = read_cpuid_id();
-       bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
+       bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
        const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
 
        WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
@@ -212,6 +212,12 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
                ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
        },
 #endif
+#ifdef CONFIG_ARM64_ERRATUM_2441009
+       {
+               /* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
+               ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
+       },
+#endif
        {},
 };
 #endif
@@ -395,6 +401,14 @@ static struct midr_range trbe_write_out_of_range_cpus[] = {
 };
 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
 
+#ifdef CONFIG_ARM64_ERRATUM_1742098
+static struct midr_range broken_aarch32_aes[] = {
+       MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+       {},
+};
+#endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
+
 const struct arm64_cpu_capabilities arm64_errata[] = {
 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
        {
@@ -480,7 +494,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 #endif
 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
        {
-               .desc = "Qualcomm erratum 1009, or ARM erratum 1286807",
+               .desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009",
                .capability = ARM64_WORKAROUND_REPEAT_TLBI,
                .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
                .matches = cpucap_multi_entry_cap_matches,
@@ -658,6 +672,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
        },
 #endif
+#ifdef CONFIG_ARM64_ERRATUM_1742098
+       {
+               .desc = "ARM erratum 1742098",
+               .capability = ARM64_WORKAROUND_1742098,
+               CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+       },
+#endif
        {
        }
 };
index 42ea2bd..ad64cab 100644 (file)
@@ -79,6 +79,7 @@
 #include <asm/cpufeature.h>
 #include <asm/cpu_ops.h>
 #include <asm/fpsimd.h>
+#include <asm/hwcap.h>
 #include <asm/insn.h>
 #include <asm/kvm_host.h>
 #include <asm/mmu_context.h>
@@ -91,7 +92,7 @@
 #include <asm/virt.h>
 
 /* Kernel representation of AT_HWCAP and AT_HWCAP2 */
-static unsigned long elf_hwcap __read_mostly;
+static DECLARE_BITMAP(elf_hwcap, MAX_CPU_FEATURES) __read_mostly;
 
 #ifdef CONFIG_COMPAT
 #define COMPAT_ELF_HWCAP_DEFAULT       \
@@ -209,35 +210,35 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_I8MM_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DGH_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_BF16_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SPECRES_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_SPECRES_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_SB_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_FRINTTS_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
-                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_GPI_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
-                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_GPA_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_FCMA_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
-                      FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_EL1_API_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
-                      FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_EL1_APA_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, 0),
        ARM64_FTR_END,
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
-                      FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_APA3_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
-                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_GPA3_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_WFXT_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_GPA3_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, 0),
        ARM64_FTR_END,
 };
 
@@ -276,41 +277,41 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
 
 static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
-                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F64MM_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
-                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F32MM_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
-                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_I8MM_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_I8MM_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
-                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SM4_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
-                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SHA3_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
-                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BF16_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
-                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BitPerm_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
-                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_AES_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
-                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, 0),
        ARM64_FTR_END,
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = {
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
-                      FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_FA64_SHIFT, 1, 0),
+                      FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
-                      FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_I16I64_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
-                      FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_F64F64_SHIFT, 1, 0),
+                      FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
-                      FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_I8I32_SHIFT, 4, 0),
+                      FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
-                      FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_F16F32_SHIFT, 1, 0),
+                      FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
-                      FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_B16F32_SHIFT, 1, 0),
+                      FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
-                      FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_F32F32_SHIFT, 1, 0),
+                      FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, 0),
        ARM64_FTR_END,
 };
 
@@ -361,6 +362,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TIDCP1_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0),
@@ -396,18 +398,18 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
 
 static const struct arm64_ftr_bits ftr_ctr[] = {
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DIC_SHIFT, 1, 1),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_IDC_SHIFT, 1, 1),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_EL0_CWG_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_EL0_ERG_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DminLine_SHIFT, 4, 1),
        /*
         * Linux can handle differing I-cache policies. Userspace JITs will
         * make use of *minLine.
         * If we have differing I-cache policies, report it as the weakest - VIPT.
         */
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, CTR_L1IP_SHIFT, 2, ICACHE_POLICY_VIPT),   /* L1Ip */
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, CTR_EL0_L1Ip_SHIFT, 2, CTR_EL0_L1Ip_VIPT),        /* L1Ip */
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_IminLine_SHIFT, 4, 0),
        ARM64_FTR_END,
 };
 
@@ -453,13 +455,13 @@ static const struct arm64_ftr_bits ftr_mvfr2[] = {
 };
 
 static const struct arm64_ftr_bits ftr_dczid[] = {
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, DCZID_DZP_SHIFT, 1, 1),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, DCZID_BS_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, DCZID_EL0_DZP_SHIFT, 1, 1),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, DCZID_EL0_BS_SHIFT, 4, 0),
        ARM64_FTR_END,
 };
 
 static const struct arm64_ftr_bits ftr_gmid[] = {
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, SYS_GMID_EL1_BS_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, GMID_EL1_BS_SHIFT, 4, 0),
        ARM64_FTR_END,
 };
 
@@ -561,7 +563,7 @@ static const struct arm64_ftr_bits ftr_id_pfr2[] = {
 
 static const struct arm64_ftr_bits ftr_id_dfr0[] = {
        /* [31:28] TraceFilt */
-       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_PERFMON_SHIFT, 4, 0xf),
+       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_PERFMON_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MPROFDBG_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPTRC_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPTRC_SHIFT, 4, 0),
@@ -631,7 +633,10 @@ static const struct arm64_ftr_bits ftr_raz[] = {
        __ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override)
 
 struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override;
+struct arm64_ftr_override __ro_after_init id_aa64pfr0_override;
 struct arm64_ftr_override __ro_after_init id_aa64pfr1_override;
+struct arm64_ftr_override __ro_after_init id_aa64zfr0_override;
+struct arm64_ftr_override __ro_after_init id_aa64smfr0_override;
 struct arm64_ftr_override __ro_after_init id_aa64isar1_override;
 struct arm64_ftr_override __ro_after_init id_aa64isar2_override;
 
@@ -668,11 +673,14 @@ static const struct __ftr_reg_entry {
        ARM64_FTR_REG(SYS_ID_MMFR5_EL1, ftr_id_mmfr5),
 
        /* Op1 = 0, CRn = 0, CRm = 4 */
-       ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
+       ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0,
+                              &id_aa64pfr0_override),
        ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1,
                               &id_aa64pfr1_override),
-       ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
-       ARM64_FTR_REG(SYS_ID_AA64SMFR0_EL1, ftr_id_aa64smfr0),
+       ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0,
+                              &id_aa64zfr0_override),
+       ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64SMFR0_EL1, ftr_id_aa64smfr0,
+                              &id_aa64smfr0_override),
 
        /* Op1 = 0, CRn = 0, CRm = 5 */
        ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
@@ -993,15 +1001,24 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
        if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
                init_32bit_cpu_features(&info->aarch32);
 
-       if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
+       if (IS_ENABLED(CONFIG_ARM64_SVE) &&
+           id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
+               info->reg_zcr = read_zcr_features();
                init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
                vec_init_vq_map(ARM64_VEC_SVE);
        }
 
-       if (id_aa64pfr1_sme(info->reg_id_aa64pfr1)) {
+       if (IS_ENABLED(CONFIG_ARM64_SME) &&
+           id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
+               info->reg_smcr = read_smcr_features();
+               /*
+                * We mask out SMPS since even if the hardware
+                * supports priorities the kernel does not at present
+                * and we block access to them.
+                */
+               info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
                init_cpu_ftr_reg(SYS_SMCR_EL1, info->reg_smcr);
-               if (IS_ENABLED(CONFIG_ARM64_SME))
-                       vec_init_vq_map(ARM64_VEC_SME);
+               vec_init_vq_map(ARM64_VEC_SME);
        }
 
        if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
@@ -1233,23 +1250,31 @@ void update_cpu_features(int cpu,
        taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu,
                                      info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0);
 
-       if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
+       if (IS_ENABLED(CONFIG_ARM64_SVE) &&
+           id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
+               info->reg_zcr = read_zcr_features();
                taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
                                        info->reg_zcr, boot->reg_zcr);
 
-               /* Probe vector lengths, unless we already gave up on SVE */
-               if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
-                   !system_capabilities_finalized())
+               /* Probe vector lengths */
+               if (!system_capabilities_finalized())
                        vec_update_vq_map(ARM64_VEC_SVE);
        }
 
-       if (id_aa64pfr1_sme(info->reg_id_aa64pfr1)) {
+       if (IS_ENABLED(CONFIG_ARM64_SME) &&
+           id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
+               info->reg_smcr = read_smcr_features();
+               /*
+                * We mask out SMPS since even if the hardware
+                * supports priorities the kernel does not at present
+                * and we block access to them.
+                */
+               info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
                taint |= check_update_ftr_reg(SYS_SMCR_EL1, cpu,
                                        info->reg_smcr, boot->reg_smcr);
 
-               /* Probe vector lengths, unless we already gave up on SME */
-               if (id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1)) &&
-                   !system_capabilities_finalized())
+               /* Probe vector lengths */
+               if (!system_capabilities_finalized())
                        vec_update_vq_map(ARM64_VEC_SME);
        }
 
@@ -1480,7 +1505,7 @@ static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
        else
                ctr = read_cpuid_effective_cachetype();
 
-       return ctr & BIT(CTR_IDC_SHIFT);
+       return ctr & BIT(CTR_EL0_IDC_SHIFT);
 }
 
 static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused)
@@ -1491,7 +1516,7 @@ static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unu
         * to the CTR_EL0 on this CPU and emulate it with the real/safe
         * value.
         */
-       if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT)))
+       if (!(read_cpuid_cachetype() & BIT(CTR_EL0_IDC_SHIFT)))
                sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
 }
 
@@ -1505,7 +1530,7 @@ static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
        else
                ctr = read_cpuid_cachetype();
 
-       return ctr & BIT(CTR_DIC_SHIFT);
+       return ctr & BIT(CTR_EL0_DIC_SHIFT);
 }
 
 static bool __maybe_unused
@@ -1645,14 +1670,34 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
 }
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define KPTI_NG_TEMP_VA                (-(1UL << PMD_SHIFT))
+
+extern
+void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
+                            phys_addr_t size, pgprot_t prot,
+                            phys_addr_t (*pgtable_alloc)(int), int flags);
+
+static phys_addr_t kpti_ng_temp_alloc;
+
+static phys_addr_t kpti_ng_pgd_alloc(int shift)
+{
+       kpti_ng_temp_alloc -= PAGE_SIZE;
+       return kpti_ng_temp_alloc;
+}
+
 static void __nocfi
 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
 {
-       typedef void (kpti_remap_fn)(int, int, phys_addr_t);
+       typedef void (kpti_remap_fn)(int, int, phys_addr_t, unsigned long);
        extern kpti_remap_fn idmap_kpti_install_ng_mappings;
        kpti_remap_fn *remap_fn;
 
        int cpu = smp_processor_id();
+       int levels = CONFIG_PGTABLE_LEVELS;
+       int order = order_base_2(levels);
+       u64 kpti_ng_temp_pgd_pa = 0;
+       pgd_t *kpti_ng_temp_pgd;
+       u64 alloc = 0;
 
        if (__this_cpu_read(this_cpu_vector) == vectors) {
                const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
@@ -1670,12 +1715,40 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
 
        remap_fn = (void *)__pa_symbol(function_nocfi(idmap_kpti_install_ng_mappings));
 
+       if (!cpu) {
+               alloc = __get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
+               kpti_ng_temp_pgd = (pgd_t *)(alloc + (levels - 1) * PAGE_SIZE);
+               kpti_ng_temp_alloc = kpti_ng_temp_pgd_pa = __pa(kpti_ng_temp_pgd);
+
+               //
+               // Create a minimal page table hierarchy that permits us to map
+               // the swapper page tables temporarily as we traverse them.
+               //
+               // The physical pages are laid out as follows:
+               //
+               // +--------+-/-------+-/------ +-\\--------+
+               // :  PTE[] : | PMD[] : | PUD[] : || PGD[]  :
+               // +--------+-\-------+-\------ +-//--------+
+               //      ^
+               // The first page is mapped into this hierarchy at a PMD_SHIFT
+               // aligned virtual address, so that we can manipulate the PTE
+               // level entries while the mapping is active. The first entry
+               // covers the PTE[] page itself, the remaining entries are free
+               // to be used as a ad-hoc fixmap.
+               //
+               create_kpti_ng_temp_pgd(kpti_ng_temp_pgd, __pa(alloc),
+                                       KPTI_NG_TEMP_VA, PAGE_SIZE, PAGE_KERNEL,
+                                       kpti_ng_pgd_alloc, 0);
+       }
+
        cpu_install_idmap();
-       remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
+       remap_fn(cpu, num_online_cpus(), kpti_ng_temp_pgd_pa, KPTI_NG_TEMP_VA);
        cpu_uninstall_idmap();
 
-       if (!cpu)
+       if (!cpu) {
+               free_pages(alloc, order);
                arm64_use_ng_mappings = true;
+       }
 }
 #else
 static void
@@ -1971,21 +2044,26 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
 }
 #endif /* CONFIG_ARM64_MTE */
 
+static void elf_hwcap_fixup(void)
+{
+#ifdef CONFIG_ARM64_ERRATUM_1742098
+       if (cpus_have_const_cap(ARM64_WORKAROUND_1742098))
+               compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
+#endif /* ARM64_ERRATUM_1742098 */
+}
+
 #ifdef CONFIG_KVM
 static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused)
 {
-       if (kvm_get_mode() != KVM_MODE_PROTECTED)
-               return false;
-
-       if (is_kernel_in_hyp_mode()) {
-               pr_warn("Protected KVM not available with VHE\n");
-               return false;
-       }
-
-       return true;
+       return kvm_get_mode() == KVM_MODE_PROTECTED;
 }
 #endif /* CONFIG_KVM */
 
+static void cpu_trap_el0_impdef(const struct arm64_cpu_capabilities *__unused)
+{
+       sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_TIDCP);
+}
+
 /* Internal helper functions to match cpu capability type */
 static bool
 cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
@@ -2140,7 +2218,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .type = ARM64_CPUCAP_SYSTEM_FEATURE,
                .matches = has_cpuid_feature,
                .sys_reg = SYS_ID_AA64ISAR1_EL1,
-               .field_pos = ID_AA64ISAR1_DPB_SHIFT,
+               .field_pos = ID_AA64ISAR1_EL1_DPB_SHIFT,
                .field_width = 4,
                .min_field_value = 1,
        },
@@ -2151,7 +2229,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .matches = has_cpuid_feature,
                .sys_reg = SYS_ID_AA64ISAR1_EL1,
                .sign = FTR_UNSIGNED,
-               .field_pos = ID_AA64ISAR1_DPB_SHIFT,
+               .field_pos = ID_AA64ISAR1_EL1_DPB_SHIFT,
                .field_width = 4,
                .min_field_value = 2,
        },
@@ -2311,7 +2389,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .type = ARM64_CPUCAP_SYSTEM_FEATURE,
                .matches = has_cpuid_feature,
                .sys_reg = SYS_ID_AA64ISAR1_EL1,
-               .field_pos = ID_AA64ISAR1_SB_SHIFT,
+               .field_pos = ID_AA64ISAR1_EL1_SB_SHIFT,
                .field_width = 4,
                .sign = FTR_UNSIGNED,
                .min_field_value = 1,
@@ -2323,9 +2401,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
                .sys_reg = SYS_ID_AA64ISAR1_EL1,
                .sign = FTR_UNSIGNED,
-               .field_pos = ID_AA64ISAR1_APA_SHIFT,
+               .field_pos = ID_AA64ISAR1_EL1_APA_SHIFT,
                .field_width = 4,
-               .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
+               .min_field_value = ID_AA64ISAR1_EL1_APA_PAuth,
                .matches = has_address_auth_cpucap,
        },
        {
@@ -2334,9 +2412,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
                .sys_reg = SYS_ID_AA64ISAR2_EL1,
                .sign = FTR_UNSIGNED,
-               .field_pos = ID_AA64ISAR2_APA3_SHIFT,
+               .field_pos = ID_AA64ISAR2_EL1_APA3_SHIFT,
                .field_width = 4,
-               .min_field_value = ID_AA64ISAR2_APA3_ARCHITECTED,
+               .min_field_value = ID_AA64ISAR2_EL1_APA3_PAuth,
                .matches = has_address_auth_cpucap,
        },
        {
@@ -2345,9 +2423,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
                .sys_reg = SYS_ID_AA64ISAR1_EL1,
                .sign = FTR_UNSIGNED,
-               .field_pos = ID_AA64ISAR1_API_SHIFT,
+               .field_pos = ID_AA64ISAR1_EL1_API_SHIFT,
                .field_width = 4,
-               .min_field_value = ID_AA64ISAR1_API_IMP_DEF,
+               .min_field_value = ID_AA64ISAR1_EL1_API_PAuth,
                .matches = has_address_auth_cpucap,
        },
        {
@@ -2361,9 +2439,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .type = ARM64_CPUCAP_SYSTEM_FEATURE,
                .sys_reg = SYS_ID_AA64ISAR1_EL1,
                .sign = FTR_UNSIGNED,
-               .field_pos = ID_AA64ISAR1_GPA_SHIFT,
+               .field_pos = ID_AA64ISAR1_EL1_GPA_SHIFT,
                .field_width = 4,
-               .min_field_value = ID_AA64ISAR1_GPA_ARCHITECTED,
+               .min_field_value = ID_AA64ISAR1_EL1_GPA_IMP,
                .matches = has_cpuid_feature,
        },
        {
@@ -2372,9 +2450,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .type = ARM64_CPUCAP_SYSTEM_FEATURE,
                .sys_reg = SYS_ID_AA64ISAR2_EL1,
                .sign = FTR_UNSIGNED,
-               .field_pos = ID_AA64ISAR2_GPA3_SHIFT,
+               .field_pos = ID_AA64ISAR2_EL1_GPA3_SHIFT,
                .field_width = 4,
-               .min_field_value = ID_AA64ISAR2_GPA3_ARCHITECTED,
+               .min_field_value = ID_AA64ISAR2_EL1_GPA3_IMP,
                .matches = has_cpuid_feature,
        },
        {
@@ -2383,9 +2461,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .type = ARM64_CPUCAP_SYSTEM_FEATURE,
                .sys_reg = SYS_ID_AA64ISAR1_EL1,
                .sign = FTR_UNSIGNED,
-               .field_pos = ID_AA64ISAR1_GPI_SHIFT,
+               .field_pos = ID_AA64ISAR1_EL1_GPI_SHIFT,
                .field_width = 4,
-               .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF,
+               .min_field_value = ID_AA64ISAR1_EL1_GPI_IMP,
                .matches = has_cpuid_feature,
        },
        {
@@ -2486,7 +2564,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .type = ARM64_CPUCAP_SYSTEM_FEATURE,
                .sys_reg = SYS_ID_AA64ISAR1_EL1,
                .sign = FTR_UNSIGNED,
-               .field_pos = ID_AA64ISAR1_LRCPC_SHIFT,
+               .field_pos = ID_AA64ISAR1_EL1_LRCPC_SHIFT,
                .field_width = 4,
                .matches = has_cpuid_feature,
                .min_field_value = 1,
@@ -2511,9 +2589,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .capability = ARM64_SME_FA64,
                .sys_reg = SYS_ID_AA64SMFR0_EL1,
                .sign = FTR_UNSIGNED,
-               .field_pos = ID_AA64SMFR0_FA64_SHIFT,
+               .field_pos = ID_AA64SMFR0_EL1_FA64_SHIFT,
                .field_width = 1,
-               .min_field_value = ID_AA64SMFR0_FA64,
+               .min_field_value = ID_AA64SMFR0_EL1_FA64_IMP,
                .matches = has_cpuid_feature,
                .cpu_enable = fa64_kernel_enable,
        },
@@ -2524,10 +2602,22 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .type = ARM64_CPUCAP_SYSTEM_FEATURE,
                .sys_reg = SYS_ID_AA64ISAR2_EL1,
                .sign = FTR_UNSIGNED,
-               .field_pos = ID_AA64ISAR2_WFXT_SHIFT,
+               .field_pos = ID_AA64ISAR2_EL1_WFxT_SHIFT,
                .field_width = 4,
                .matches = has_cpuid_feature,
-               .min_field_value = ID_AA64ISAR2_WFXT_SUPPORTED,
+               .min_field_value = ID_AA64ISAR2_EL1_WFxT_IMP,
+       },
+       {
+               .desc = "Trap EL0 IMPLEMENTATION DEFINED functionality",
+               .capability = ARM64_HAS_TIDCP1,
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .sys_reg = SYS_ID_AA64MMFR1_EL1,
+               .sign = FTR_UNSIGNED,
+               .field_pos = ID_AA64MMFR1_TIDCP1_SHIFT,
+               .field_width = 4,
+               .min_field_value = ID_AA64MMFR1_TIDCP1_IMP,
+               .matches = has_cpuid_feature,
+               .cpu_enable = cpu_trap_el0_impdef,
        },
        {},
 };
@@ -2568,33 +2658,33 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
 #ifdef CONFIG_ARM64_PTR_AUTH
 static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
        {
-               HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT,
+               HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_APA_SHIFT,
                                  4, FTR_UNSIGNED,
-                                 ID_AA64ISAR1_APA_ARCHITECTED)
+                                 ID_AA64ISAR1_EL1_APA_PAuth)
        },
        {
-               HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_APA3_SHIFT,
-                                 4, FTR_UNSIGNED, ID_AA64ISAR2_APA3_ARCHITECTED)
+               HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_APA3_SHIFT,
+                                 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_APA3_PAuth)
        },
        {
-               HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT,
-                                 4, FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF)
+               HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_API_SHIFT,
+                                 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_API_PAuth)
        },
        {},
 };
 
 static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
        {
-               HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT,
-                                 4, FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED)
+               HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_GPA_SHIFT,
+                                 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_GPA_IMP)
        },
        {
-               HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_GPA3_SHIFT,
-                                 4, FTR_UNSIGNED, ID_AA64ISAR2_GPA3_ARCHITECTED)
+               HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_GPA3_SHIFT,
+                                 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_GPA3_IMP)
        },
        {
-               HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT,
-                                 4, FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF)
+               HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_GPI_SHIFT,
+                                 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_GPI_IMP)
        },
        {},
 };
@@ -2622,30 +2712,31 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
-       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
-       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
-       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
-       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA),
-       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC),
-       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
-       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FRINTTS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT),
-       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
-       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_BF16_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16),
-       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DGH_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH),
-       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_FCMA_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_FRINTTS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_SB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_EBF16),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM),
        HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
 #ifdef CONFIG_ARM64_SVE
        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
-       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SVEVER_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_SVEVER_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
-       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
-       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
-       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
-       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_BF16, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
-       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
-       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
-       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_I8MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_I8MM, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
-       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F32MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_F32MM, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
-       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F64MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_F64MM, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BitPerm_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BitPerm_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BF16_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SHA3_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SHA3_IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SM4_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SM4_IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_I8MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F32MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F64MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
 #endif
        HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
 #ifdef CONFIG_ARM64_BTI
@@ -2661,17 +2752,17 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
 #endif /* CONFIG_ARM64_MTE */
        HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
        HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
-       HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
-       HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_WFXT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_WFXT_SUPPORTED, CAP_HWCAP, KERNEL_HWCAP_WFXT),
+       HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
+       HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT),
 #ifdef CONFIG_ARM64_SME
        HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SME, CAP_HWCAP, KERNEL_HWCAP_SME),
-       HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_FA64, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
-       HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_I16I64, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
-       HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_F64F64, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
-       HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_I8I32_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_I8I32, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
-       HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_F16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_F16F32, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
-       HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_B16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_B16F32, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
-       HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_F32F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_F32F32, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
+       HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
+       HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
+       HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F64F64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
+       HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I8I32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
+       HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F16F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
+       HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_B16F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
+       HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F32F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
 #endif /* CONFIG_ARM64_SME */
        {},
 };
@@ -3106,15 +3197,12 @@ static bool __maybe_unused __system_matches_cap(unsigned int n)
 
 void cpu_set_feature(unsigned int num)
 {
-       WARN_ON(num >= MAX_CPU_FEATURES);
-       elf_hwcap |= BIT(num);
+       set_bit(num, elf_hwcap);
 }
-EXPORT_SYMBOL_GPL(cpu_set_feature);
 
 bool cpu_have_feature(unsigned int num)
 {
-       WARN_ON(num >= MAX_CPU_FEATURES);
-       return elf_hwcap & BIT(num);
+       return test_bit(num, elf_hwcap);
 }
 EXPORT_SYMBOL_GPL(cpu_have_feature);
 
@@ -3125,12 +3213,12 @@ unsigned long cpu_get_elf_hwcap(void)
         * note that for userspace compatibility we guarantee that bits 62
         * and 63 will always be returned as 0.
         */
-       return lower_32_bits(elf_hwcap);
+       return elf_hwcap[0];
 }
 
 unsigned long cpu_get_elf_hwcap2(void)
 {
-       return upper_32_bits(elf_hwcap);
+       return elf_hwcap[1];
 }
 
 static void __init setup_system_capabilities(void)
@@ -3152,8 +3240,10 @@ void __init setup_cpu_features(void)
        setup_system_capabilities();
        setup_elf_hwcaps(arm64_elf_hwcaps);
 
-       if (system_supports_32bit_el0())
+       if (system_supports_32bit_el0()) {
                setup_elf_hwcaps(compat_elf_hwcaps);
+               elf_hwcap_fixup();
+       }
 
        if (system_uses_ttbr0_pan())
                pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
@@ -3206,6 +3296,7 @@ static int enable_mismatched_32bit_el0(unsigned int cpu)
                                                         cpu_active_mask);
        get_cpu_device(lucky_winner)->offline_disabled = true;
        setup_elf_hwcaps(compat_elf_hwcaps);
+       elf_hwcap_fixup();
        pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n",
                cpu, lucky_winner);
        return 0;
@@ -3227,7 +3318,7 @@ subsys_initcall_sync(init_32bit_el0_mask);
 
 static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
 {
-       cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+       cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir);
 }
 
 /*
index 3006f43..4150e30 100644 (file)
 #include <linux/of_device.h>
 #include <linux/psci.h>
 
-#include <asm/cpuidle.h>
-#include <asm/cpu_ops.h>
-
-int arm_cpuidle_init(unsigned int cpu)
-{
-       const struct cpu_operations *ops = get_cpu_ops(cpu);
-       int ret = -EOPNOTSUPP;
-
-       if (ops && ops->cpu_suspend && ops->cpu_init_idle)
-               ret = ops->cpu_init_idle(cpu);
-
-       return ret;
-}
-
-/**
- * arm_cpuidle_suspend() - function to enter a low-power idle state
- * @index: argument to pass to CPU suspend operations
- *
- * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
- * operations back-end error code otherwise.
- */
-int arm_cpuidle_suspend(int index)
-{
-       int cpu = smp_processor_id();
-       const struct cpu_operations *ops = get_cpu_ops(cpu);
-
-       return ops->cpu_suspend(index);
-}
-
 #ifdef CONFIG_ACPI
 
 #include <acpi/processor.h>
index 8eff0a3..d7702f3 100644 (file)
 DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
 static struct cpuinfo_arm64 boot_cpu_data;
 
-static const char *icache_policy_str[] = {
-       [ICACHE_POLICY_VPIPT]           = "VPIPT",
-       [ICACHE_POLICY_RESERVED]        = "RESERVED/UNKNOWN",
-       [ICACHE_POLICY_VIPT]            = "VIPT",
-       [ICACHE_POLICY_PIPT]            = "PIPT",
-};
+static inline const char *icache_policy_str(int l1ip)
+{
+       switch (l1ip) {
+       case CTR_EL0_L1Ip_VPIPT:
+               return "VPIPT";
+       case CTR_EL0_L1Ip_VIPT:
+               return "VIPT";
+       case CTR_EL0_L1Ip_PIPT:
+               return "PIPT";
+       default:
+               return "RESERVED/UNKNOWN";
+       }
+}
 
 unsigned long __icache_flags;
 
@@ -107,6 +114,7 @@ static const char *const hwcap_str[] = {
        [KERNEL_HWCAP_SME_F32F32]       = "smef32f32",
        [KERNEL_HWCAP_SME_FA64]         = "smefa64",
        [KERNEL_HWCAP_WFXT]             = "wfxt",
+       [KERNEL_HWCAP_EBF16]            = "ebf16",
 };
 
 #ifdef CONFIG_COMPAT
@@ -267,6 +275,7 @@ static struct kobj_type cpuregs_kobj_type = {
 
 CPUREGS_ATTR_RO(midr_el1, midr);
 CPUREGS_ATTR_RO(revidr_el1, revidr);
+CPUREGS_ATTR_RO(smidr_el1, smidr);
 
 static struct attribute *cpuregs_id_attrs[] = {
        &cpuregs_attr_midr_el1.attr,
@@ -279,6 +288,16 @@ static const struct attribute_group cpuregs_attr_group = {
        .name = "identification"
 };
 
+static struct attribute *sme_cpuregs_id_attrs[] = {
+       &cpuregs_attr_smidr_el1.attr,
+       NULL
+};
+
+static const struct attribute_group sme_cpuregs_attr_group = {
+       .attrs = sme_cpuregs_id_attrs,
+       .name = "identification"
+};
+
 static int cpuid_cpu_online(unsigned int cpu)
 {
        int rc;
@@ -296,6 +315,8 @@ static int cpuid_cpu_online(unsigned int cpu)
        rc = sysfs_create_group(&info->kobj, &cpuregs_attr_group);
        if (rc)
                kobject_del(&info->kobj);
+       if (system_supports_sme())
+               rc = sysfs_merge_group(&info->kobj, &sme_cpuregs_attr_group);
 out:
        return rc;
 }
@@ -342,19 +363,19 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
        u32 l1ip = CTR_L1IP(info->reg_ctr);
 
        switch (l1ip) {
-       case ICACHE_POLICY_PIPT:
+       case CTR_EL0_L1Ip_PIPT:
                break;
-       case ICACHE_POLICY_VPIPT:
+       case CTR_EL0_L1Ip_VPIPT:
                set_bit(ICACHEF_VPIPT, &__icache_flags);
                break;
-       case ICACHE_POLICY_RESERVED:
-       case ICACHE_POLICY_VIPT:
+       case CTR_EL0_L1Ip_VIPT:
+       default:
                /* Assume aliasing */
                set_bit(ICACHEF_ALIASING, &__icache_flags);
                break;
        }
 
-       pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
+       pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str(l1ip), cpu);
 }
 
 static void __cpuinfo_store_cpu_32bit(struct cpuinfo_32bit *info)
@@ -418,14 +439,6 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
        if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
                __cpuinfo_store_cpu_32bit(&info->aarch32);
 
-       if (IS_ENABLED(CONFIG_ARM64_SVE) &&
-           id_aa64pfr0_sve(info->reg_id_aa64pfr0))
-               info->reg_zcr = read_zcr_features();
-
-       if (IS_ENABLED(CONFIG_ARM64_SME) &&
-           id_aa64pfr1_sme(info->reg_id_aa64pfr1))
-               info->reg_smcr = read_smcr_features();
-
        cpuinfo_detect_icache_policy(info);
 }
 
index d42a205..bd5df50 100644 (file)
@@ -102,7 +102,6 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
  * x19-x29 per the AAPCS, and we created frame records upon entry, so we need
  * to restore x0-x8, x29, and x30.
  */
-ftrace_common_return:
        /* Restore function arguments */
        ldp     x0, x1, [sp]
        ldp     x2, x3, [sp, #S_X2]
index 5b82b92..254fe31 100644 (file)
@@ -636,18 +636,28 @@ alternative_else_nop_endif
         */
        .endm
 
-       .macro tramp_data_page  dst
-       adr_l   \dst, .entry.tramp.text
-       sub     \dst, \dst, PAGE_SIZE
-       .endm
-
-       .macro tramp_data_read_var      dst, var
-#ifdef CONFIG_RANDOMIZE_BASE
-       tramp_data_page         \dst
-       add     \dst, \dst, #:lo12:__entry_tramp_data_\var
-       ldr     \dst, [\dst]
+       .macro          tramp_data_read_var     dst, var
+#ifdef CONFIG_RELOCATABLE
+       ldr             \dst, .L__tramp_data_\var
+       .ifndef         .L__tramp_data_\var
+       .pushsection    ".entry.tramp.rodata", "a", %progbits
+       .align          3
+.L__tramp_data_\var:
+       .quad           \var
+       .popsection
+       .endif
 #else
-       ldr     \dst, =\var
+       /*
+        * As !RELOCATABLE implies !RANDOMIZE_BASE the address is always a
+        * compile time constant (and hence not secret and not worth hiding).
+        *
+        * As statically allocated kernel code and data always live in the top
+        * 47 bits of the address space we can sign-extend bit 47 and avoid an
+        * instruction to load the upper 16 bits (which must be 0xFFFF).
+        */
+       movz            \dst, :abs_g2_s:\var
+       movk            \dst, :abs_g1_nc:\var
+       movk            \dst, :abs_g0_nc:\var
 #endif
        .endm
 
@@ -695,7 +705,7 @@ alternative_else_nop_endif
        msr     vbar_el1, x30
        isb
        .else
-       ldr     x30, =vectors
+       adr_l   x30, vectors
        .endif // \kpti == 1
 
        .if     \bhb == BHB_MITIGATION_FW
@@ -764,24 +774,7 @@ SYM_CODE_END(tramp_exit_native)
 SYM_CODE_START(tramp_exit_compat)
        tramp_exit      32
 SYM_CODE_END(tramp_exit_compat)
-
-       .ltorg
        .popsection                             // .entry.tramp.text
-#ifdef CONFIG_RANDOMIZE_BASE
-       .pushsection ".rodata", "a"
-       .align PAGE_SHIFT
-SYM_DATA_START(__entry_tramp_data_start)
-__entry_tramp_data_vectors:
-       .quad   vectors
-#ifdef CONFIG_ARM_SDE_INTERFACE
-__entry_tramp_data___sdei_asm_handler:
-       .quad   __sdei_asm_handler
-#endif /* CONFIG_ARM_SDE_INTERFACE */
-__entry_tramp_data_this_cpu_vector:
-       .quad   this_cpu_vector
-SYM_DATA_END(__entry_tramp_data_start)
-       .popsection                             // .rodata
-#endif /* CONFIG_RANDOMIZE_BASE */
 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 /*
@@ -932,7 +925,6 @@ NOKPROBE(call_on_irq_stack)
  * This clobbers x4, __sdei_handler() will restore this from firmware's
  * copy.
  */
-.ltorg
 .pushsection ".entry.tramp.text", "ax"
 SYM_CODE_START(__sdei_asm_entry_trampoline)
        mrs     x4, ttbr1_el1
@@ -967,7 +959,6 @@ SYM_CODE_START(__sdei_asm_exit_trampoline)
 1:     sdei_handler_exit exit_mode=x2
 SYM_CODE_END(__sdei_asm_exit_trampoline)
 NOKPROBE(__sdei_asm_exit_trampoline)
-       .ltorg
 .popsection            // .entry.tramp.text
 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
index aecf307..dd63ffc 100644 (file)
@@ -445,7 +445,6 @@ static void fpsimd_save(void)
 
        if (system_supports_sme()) {
                u64 *svcr = last->svcr;
-               *svcr = read_sysreg_s(SYS_SVCR);
 
                *svcr = read_sysreg_s(SYS_SVCR);
 
index f447c4a..ea5dc7c 100644 (file)
@@ -78,47 +78,76 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
 }
 
 /*
- * Turn on the call to ftrace_caller() in instrumented function
+ * Find the address the callsite must branch to in order to reach '*addr'.
+ *
+ * Due to the limited range of 'BL' instructions, modules may be placed too far
+ * away to branch directly and must use a PLT.
+ *
+ * Returns true when '*addr' contains a reachable target address, or has been
+ * modified to contain a PLT address. Returns false otherwise.
  */
-int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
+                                     struct module *mod,
+                                     unsigned long *addr)
 {
        unsigned long pc = rec->ip;
-       u32 old, new;
-       long offset = (long)pc - (long)addr;
+       long offset = (long)*addr - (long)pc;
+       struct plt_entry *plt;
 
-       if (offset < -SZ_128M || offset >= SZ_128M) {
-               struct module *mod;
-               struct plt_entry *plt;
+       /*
+        * When the target is within range of the 'BL' instruction, use 'addr'
+        * as-is and branch to that directly.
+        */
+       if (offset >= -SZ_128M && offset < SZ_128M)
+               return true;
 
-               if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
-                       return -EINVAL;
+       /*
+        * When the target is outside of the range of a 'BL' instruction, we
+        * must use a PLT to reach it. We can only place PLTs for modules, and
+        * only when module PLT support is built-in.
+        */
+       if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
+               return false;
 
-               /*
-                * On kernels that support module PLTs, the offset between the
-                * branch instruction and its target may legally exceed the
-                * range of an ordinary relative 'bl' opcode. In this case, we
-                * need to branch via a trampoline in the module.
-                *
-                * NOTE: __module_text_address() must be called with preemption
-                * disabled, but we can rely on ftrace_lock to ensure that 'mod'
-                * retains its validity throughout the remainder of this code.
-                */
+       /*
+        * 'mod' is only set at module load time, but if we end up
+        * dealing with an out-of-range condition, we can assume it
+        * is due to a module being loaded far away from the kernel.
+        *
+        * NOTE: __module_text_address() must be called with preemption
+        * disabled, but we can rely on ftrace_lock to ensure that 'mod'
+        * retains its validity throughout the remainder of this code.
+        */
+       if (!mod) {
                preempt_disable();
                mod = __module_text_address(pc);
                preempt_enable();
+       }
 
-               if (WARN_ON(!mod))
-                       return -EINVAL;
+       if (WARN_ON(!mod))
+               return false;
 
-               plt = get_ftrace_plt(mod, addr);
-               if (!plt) {
-                       pr_err("ftrace: no module PLT for %ps\n", (void *)addr);
-                       return -EINVAL;
-               }
-
-               addr = (unsigned long)plt;
+       plt = get_ftrace_plt(mod, *addr);
+       if (!plt) {
+               pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
+               return false;
        }
 
+       *addr = (unsigned long)plt;
+       return true;
+}
+
+/*
+ * Turn on the call to ftrace_caller() in instrumented function
+ */
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned long pc = rec->ip;
+       u32 old, new;
+
+       if (!ftrace_find_callable_addr(rec, NULL, &addr))
+               return -EINVAL;
+
        old = aarch64_insn_gen_nop();
        new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
 
@@ -132,6 +161,11 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
        unsigned long pc = rec->ip;
        u32 old, new;
 
+       if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
+               return -EINVAL;
+       if (!ftrace_find_callable_addr(rec, NULL, &addr))
+               return -EINVAL;
+
        old = aarch64_insn_gen_branch_imm(pc, old_addr,
                                          AARCH64_INSN_BRANCH_LINK);
        new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
@@ -181,54 +215,15 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
                    unsigned long addr)
 {
        unsigned long pc = rec->ip;
-       bool validate = true;
        u32 old = 0, new;
-       long offset = (long)pc - (long)addr;
 
-       if (offset < -SZ_128M || offset >= SZ_128M) {
-               u32 replaced;
-
-               if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
-                       return -EINVAL;
-
-               /*
-                * 'mod' is only set at module load time, but if we end up
-                * dealing with an out-of-range condition, we can assume it
-                * is due to a module being loaded far away from the kernel.
-                */
-               if (!mod) {
-                       preempt_disable();
-                       mod = __module_text_address(pc);
-                       preempt_enable();
-
-                       if (WARN_ON(!mod))
-                               return -EINVAL;
-               }
-
-               /*
-                * The instruction we are about to patch may be a branch and
-                * link instruction that was redirected via a PLT entry. In
-                * this case, the normal validation will fail, but we can at
-                * least check that we are dealing with a branch and link
-                * instruction that points into the right module.
-                */
-               if (aarch64_insn_read((void *)pc, &replaced))
-                       return -EFAULT;
-
-               if (!aarch64_insn_is_bl(replaced) ||
-                   !within_module(pc + aarch64_get_branch_offset(replaced),
-                                  mod))
-                       return -EINVAL;
-
-               validate = false;
-       } else {
-               old = aarch64_insn_gen_branch_imm(pc, addr,
-                                                 AARCH64_INSN_BRANCH_LINK);
-       }
+       if (!ftrace_find_callable_addr(rec, mod, &addr))
+               return -EINVAL;
 
+       old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
        new = aarch64_insn_gen_nop();
 
-       return ftrace_modify_code(pc, old, new, validate);
+       return ftrace_modify_code(pc, old, new, true);
 }
 
 void arch_ftrace_update_code(int command)
index 6a98f1a..cefe6a7 100644 (file)
@@ -37,8 +37,6 @@
 
 #include "efi-header.S"
 
-#define __PHYS_OFFSET  KERNEL_START
-
 #if (PAGE_OFFSET & 0x1fffff) != 0
 #error PAGE_OFFSET must be at least 2MB aligned
 #endif
@@ -51,9 +49,6 @@
  *   MMU = off, D-cache = off, I-cache = on or off,
  *   x0 = physical address to the FDT blob.
  *
- * This code is mostly position independent so you call this at
- * __pa(PAGE_OFFSET).
- *
  * Note that the callee-saved registers are used for storing variables
  * that are useful before the MMU is enabled. The allocations are described
  * in the entry routines.
         * primary lowlevel boot path:
         *
         *  Register   Scope                      Purpose
+        *  x20        primary_entry() .. __primary_switch()    CPU boot mode
         *  x21        primary_entry() .. start_kernel()        FDT pointer passed at boot in x0
+        *  x22        create_idmap() .. start_kernel()         ID map VA of the DT blob
         *  x23        primary_entry() .. start_kernel()        physical misalignment/KASLR offset
-        *  x28        __create_page_tables()                   callee preserved temp register
-        *  x19/x20    __primary_switch()                       callee preserved temp registers
-        *  x24        __primary_switch() .. relocate_kernel()  current RELR displacement
+        *  x24        __primary_switch()                       linear map KASLR seed
+        *  x25        primary_entry() .. start_kernel()        supported VA size
+        *  x28        create_idmap()                           callee preserved temp register
         */
 SYM_CODE_START(primary_entry)
        bl      preserve_boot_args
        bl      init_kernel_el                  // w0=cpu_boot_mode
-       adrp    x23, __PHYS_OFFSET
-       and     x23, x23, MIN_KIMG_ALIGN - 1    // KASLR offset, defaults to 0
-       bl      set_cpu_boot_mode_flag
-       bl      __create_page_tables
+       mov     x20, x0
+       bl      create_idmap
+
        /*
         * The following calls CPU setup code, see arch/arm64/mm/proc.S for
         * details.
         * On return, the CPU will be ready for the MMU to be turned on and
         * the TCR will have been set.
         */
+#if VA_BITS > 48
+       mrs_s   x0, SYS_ID_AA64MMFR2_EL1
+       tst     x0, #0xf << ID_AA64MMFR2_LVA_SHIFT
+       mov     x0, #VA_BITS
+       mov     x25, #VA_BITS_MIN
+       csel    x25, x25, x0, eq
+       mov     x0, x25
+#endif
        bl      __cpu_setup                     // initialise processor
        b       __primary_switch
 SYM_CODE_END(primary_entry)
@@ -122,28 +126,16 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
        b       dcache_inval_poc                // tail call
 SYM_CODE_END(preserve_boot_args)
 
-/*
- * Macro to create a table entry to the next page.
- *
- *     tbl:    page table address
- *     virt:   virtual address
- *     shift:  #imm page table shift
- *     ptrs:   #imm pointers per table page
- *
- * Preserves:  virt
- * Corrupts:   ptrs, tmp1, tmp2
- * Returns:    tbl -> next level table page address
- */
-       .macro  create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
-       add     \tmp1, \tbl, #PAGE_SIZE
-       phys_to_pte \tmp2, \tmp1
-       orr     \tmp2, \tmp2, #PMD_TYPE_TABLE   // address of next table and entry type
-       lsr     \tmp1, \virt, #\shift
-       sub     \ptrs, \ptrs, #1
-       and     \tmp1, \tmp1, \ptrs             // table index
-       str     \tmp2, [\tbl, \tmp1, lsl #3]
-       add     \tbl, \tbl, #PAGE_SIZE          // next level table page
-       .endm
+SYM_FUNC_START_LOCAL(clear_page_tables)
+       /*
+        * Clear the init page tables.
+        */
+       adrp    x0, init_pg_dir
+       adrp    x1, init_pg_end
+       sub     x2, x1, x0
+       mov     x1, xzr
+       b       __pi_memset                     // tail call
+SYM_FUNC_END(clear_page_tables)
 
 /*
  * Macro to populate page table entries, these entries can be pointers to the next level
@@ -179,31 +171,20 @@ SYM_CODE_END(preserve_boot_args)
  *     vstart: virtual address of start of range
  *     vend:   virtual address of end of range - we map [vstart, vend]
  *     shift:  shift used to transform virtual address into index
- *     ptrs:   number of entries in page table
+ *     order:  #imm 2log(number of entries in page table)
  *     istart: index in table corresponding to vstart
  *     iend:   index in table corresponding to vend
  *     count:  On entry: how many extra entries were required in previous level, scales
  *                       our end index.
  *             On exit: returns how many extra entries required for next page table level
  *
- * Preserves:  vstart, vend, shift, ptrs
+ * Preserves:  vstart, vend
  * Returns:    istart, iend, count
  */
-       .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count
-       lsr     \iend, \vend, \shift
-       mov     \istart, \ptrs
-       sub     \istart, \istart, #1
-       and     \iend, \iend, \istart   // iend = (vend >> shift) & (ptrs - 1)
-       mov     \istart, \ptrs
-       mul     \istart, \istart, \count
-       add     \iend, \iend, \istart   // iend += count * ptrs
-                                       // our entries span multiple tables
-
-       lsr     \istart, \vstart, \shift
-       mov     \count, \ptrs
-       sub     \count, \count, #1
-       and     \istart, \istart, \count
-
+       .macro compute_indices, vstart, vend, shift, order, istart, iend, count
+       ubfx    \istart, \vstart, \shift, \order
+       ubfx    \iend, \vend, \shift, \order
+       add     \iend, \iend, \count, lsl \order
        sub     \count, \iend, \istart
        .endm
 
@@ -218,119 +199,116 @@ SYM_CODE_END(preserve_boot_args)
  *     vend:   virtual address of end of range - we map [vstart, vend - 1]
  *     flags:  flags to use to map last level entries
  *     phys:   physical address corresponding to vstart - physical memory is contiguous
- *     pgds:   the number of pgd entries
+ *     order:  #imm 2log(number of entries in PGD table)
+ *
+ * If extra_shift is set, an extra level will be populated if the end address does
+ * not fit in 'extra_shift' bits. This assumes vend is in the TTBR0 range.
  *
  * Temporaries:        istart, iend, tmp, count, sv - these need to be different registers
  * Preserves:  vstart, flags
  * Corrupts:   tbl, rtbl, vend, istart, iend, tmp, count, sv
  */
-       .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
+       .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, order, istart, iend, tmp, count, sv, extra_shift
        sub \vend, \vend, #1
        add \rtbl, \tbl, #PAGE_SIZE
-       mov \sv, \rtbl
        mov \count, #0
-       compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count
+
+       .ifnb   \extra_shift
+       tst     \vend, #~((1 << (\extra_shift)) - 1)
+       b.eq    .L_\@
+       compute_indices \vstart, \vend, #\extra_shift, #(PAGE_SHIFT - 3), \istart, \iend, \count
+       mov \sv, \rtbl
        populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
        mov \tbl, \sv
+       .endif
+.L_\@:
+       compute_indices \vstart, \vend, #PGDIR_SHIFT, #\order, \istart, \iend, \count
        mov \sv, \rtbl
+       populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
+       mov \tbl, \sv
 
 #if SWAPPER_PGTABLE_LEVELS > 3
-       compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count
+       compute_indices \vstart, \vend, #PUD_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count
+       mov \sv, \rtbl
        populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
        mov \tbl, \sv
-       mov \sv, \rtbl
 #endif
 
 #if SWAPPER_PGTABLE_LEVELS > 2
-       compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count
+       compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count
+       mov \sv, \rtbl
        populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
        mov \tbl, \sv
 #endif
 
-       compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
-       bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1
-       populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
+       compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count
+       bic \rtbl, \phys, #SWAPPER_BLOCK_SIZE - 1
+       populate_entries \tbl, \rtbl, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
        .endm
 
 /*
- * Setup the initial page tables. We only setup the barest amount which is
- * required to get the kernel running. The following sections are required:
- *   - identity mapping to enable the MMU (low address, TTBR0)
- *   - first few MB of the kernel linear mapping to jump to once the MMU has
- *     been enabled
+ * Remap a subregion created with the map_memory macro with modified attributes
+ * or output address. The entire remapped region must have been covered in the
+ * invocation of map_memory.
+ *
+ * x0: last level table address (returned in first argument to map_memory)
+ * x1: start VA of the existing mapping
+ * x2: start VA of the region to update
+ * x3: end VA of the region to update (exclusive)
+ * x4: start PA associated with the region to update
+ * x5: attributes to set on the updated region
+ * x6: order of the last level mappings
  */
-SYM_FUNC_START_LOCAL(__create_page_tables)
-       mov     x28, lr
+SYM_FUNC_START_LOCAL(remap_region)
+       sub     x3, x3, #1              // make end inclusive
 
-       /*
-        * Invalidate the init page tables to avoid potential dirty cache lines
-        * being evicted. Other page tables are allocated in rodata as part of
-        * the kernel image, and thus are clean to the PoC per the boot
-        * protocol.
-        */
-       adrp    x0, init_pg_dir
-       adrp    x1, init_pg_end
-       bl      dcache_inval_poc
+       // Get the index offset for the start of the last level table
+       lsr     x1, x1, x6
+       bfi     x1, xzr, #0, #PAGE_SHIFT - 3
 
-       /*
-        * Clear the init page tables.
-        */
-       adrp    x0, init_pg_dir
-       adrp    x1, init_pg_end
-       sub     x1, x1, x0
-1:     stp     xzr, xzr, [x0], #16
-       stp     xzr, xzr, [x0], #16
-       stp     xzr, xzr, [x0], #16
-       stp     xzr, xzr, [x0], #16
-       subs    x1, x1, #64
-       b.ne    1b
+       // Derive the start and end indexes into the last level table
+       // associated with the provided region
+       lsr     x2, x2, x6
+       lsr     x3, x3, x6
+       sub     x2, x2, x1
+       sub     x3, x3, x1
 
-       mov     x7, SWAPPER_MM_MMUFLAGS
+       mov     x1, #1
+       lsl     x6, x1, x6              // block size at this level
 
-       /*
-        * Create the identity mapping.
-        */
-       adrp    x0, idmap_pg_dir
-       adrp    x3, __idmap_text_start          // __pa(__idmap_text_start)
-
-#ifdef CONFIG_ARM64_VA_BITS_52
-       mrs_s   x6, SYS_ID_AA64MMFR2_EL1
-       and     x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
-       mov     x5, #52
-       cbnz    x6, 1f
-#endif
-       mov     x5, #VA_BITS_MIN
-1:
-       adr_l   x6, vabits_actual
-       str     x5, [x6]
-       dmb     sy
-       dc      ivac, x6                // Invalidate potentially stale cache line
+       populate_entries x0, x4, x2, x3, x5, x6, x7
+       ret
+SYM_FUNC_END(remap_region)
 
+SYM_FUNC_START_LOCAL(create_idmap)
+       mov     x28, lr
        /*
-        * VA_BITS may be too small to allow for an ID mapping to be created
-        * that covers system RAM if that is located sufficiently high in the
-        * physical address space. So for the ID map, use an extended virtual
-        * range in that case, and configure an additional translation level
-        * if needed.
+        * The ID map carries a 1:1 mapping of the physical address range
+        * covered by the loaded image, which could be anywhere in DRAM. This
+        * means that the required size of the VA (== PA) space is decided at
+        * boot time, and could be more than the configured size of the VA
+        * space for ordinary kernel and user space mappings.
+        *
+        * There are three cases to consider here:
+        * - 39 <= VA_BITS < 48, and the ID map needs up to 48 VA bits to cover
+        *   the placement of the image. In this case, we configure one extra
+        *   level of translation on the fly for the ID map only. (This case
+        *   also covers 42-bit VA/52-bit PA on 64k pages).
         *
-        * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
-        * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
-        * this number conveniently equals the number of leading zeroes in
-        * the physical address of __idmap_text_end.
+        * - VA_BITS == 48, and the ID map needs more than 48 VA bits. This can
+        *   only happen when using 64k pages, in which case we need to extend
+        *   the root level table rather than add a level. Note that we can
+        *   treat this case as 'always extended' as long as we take care not
+        *   to program an unsupported T0SZ value into the TCR register.
+        *
+        * - Combinations that would require two additional levels of
+        *   translation are not supported, e.g., VA_BITS==36 on 16k pages, or
+        *   VA_BITS==39/4k pages with 5-level paging, where the input address
+        *   requires more than 47 or 48 bits, respectively.
         */
-       adrp    x5, __idmap_text_end
-       clz     x5, x5
-       cmp     x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
-       b.ge    1f                      // .. then skip VA range extension
-
-       adr_l   x6, idmap_t0sz
-       str     x5, [x6]
-       dmb     sy
-       dc      ivac, x6                // Invalidate potentially stale cache line
-
 #if (VA_BITS < 48)
+#define IDMAP_PGD_ORDER        (VA_BITS - PGDIR_SHIFT)
 #define EXTRA_SHIFT    (PGDIR_SHIFT + PAGE_SHIFT - 3)
-#define EXTRA_PTRS     (1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT))
 
        /*
         * If VA_BITS < 48, we have to configure an additional table level.
@@ -342,36 +320,40 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 #if VA_BITS != EXTRA_SHIFT
 #error "Mismatch between VA_BITS and page size/number of translation levels"
 #endif
-
-       mov     x4, EXTRA_PTRS
-       create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
 #else
+#define IDMAP_PGD_ORDER        (PHYS_MASK_SHIFT - PGDIR_SHIFT)
+#define EXTRA_SHIFT
        /*
         * If VA_BITS == 48, we don't have to configure an additional
         * translation level, but the top-level table has more entries.
         */
-       mov     x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
-       str_l   x4, idmap_ptrs_per_pgd, x5
 #endif
-1:
-       ldr_l   x4, idmap_ptrs_per_pgd
-       adr_l   x6, __idmap_text_end            // __pa(__idmap_text_end)
-
-       map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
-
-       /*
-        * Map the kernel image (starting with PHYS_OFFSET).
-        */
-       adrp    x0, init_pg_dir
-       mov_q   x5, KIMAGE_VADDR                // compile time __va(_text)
-       add     x5, x5, x23                     // add KASLR displacement
-       mov     x4, PTRS_PER_PGD
-       adrp    x6, _end                        // runtime __pa(_end)
-       adrp    x3, _text                       // runtime __pa(_text)
-       sub     x6, x6, x3                      // _end - _text
-       add     x6, x6, x5                      // runtime __va(_end)
-
-       map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
+       adrp    x0, init_idmap_pg_dir
+       adrp    x3, _text
+       adrp    x6, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE
+       mov     x7, SWAPPER_RX_MMUFLAGS
+
+       map_memory x0, x1, x3, x6, x7, x3, IDMAP_PGD_ORDER, x10, x11, x12, x13, x14, EXTRA_SHIFT
+
+       /* Remap the kernel page tables r/w in the ID map */
+       adrp    x1, _text
+       adrp    x2, init_pg_dir
+       adrp    x3, init_pg_end
+       bic     x4, x2, #SWAPPER_BLOCK_SIZE - 1
+       mov     x5, SWAPPER_RW_MMUFLAGS
+       mov     x6, #SWAPPER_BLOCK_SHIFT
+       bl      remap_region
+
+       /* Remap the FDT after the kernel image */
+       adrp    x1, _text
+       adrp    x22, _end + SWAPPER_BLOCK_SIZE
+       bic     x2, x22, #SWAPPER_BLOCK_SIZE - 1
+       bfi     x22, x21, #0, #SWAPPER_BLOCK_SHIFT              // remapped FDT address
+       add     x3, x2, #MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE
+       bic     x4, x21, #SWAPPER_BLOCK_SIZE - 1
+       mov     x5, SWAPPER_RW_MMUFLAGS
+       mov     x6, #SWAPPER_BLOCK_SHIFT
+       bl      remap_region
 
        /*
         * Since the page tables have been populated with non-cacheable
@@ -380,16 +362,27 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
         */
        dmb     sy
 
-       adrp    x0, idmap_pg_dir
-       adrp    x1, idmap_pg_end
+       adrp    x0, init_idmap_pg_dir
+       adrp    x1, init_idmap_pg_end
        bl      dcache_inval_poc
+       ret     x28
+SYM_FUNC_END(create_idmap)
 
+SYM_FUNC_START_LOCAL(create_kernel_mapping)
        adrp    x0, init_pg_dir
-       adrp    x1, init_pg_end
-       bl      dcache_inval_poc
+       mov_q   x5, KIMAGE_VADDR                // compile time __va(_text)
+       add     x5, x5, x23                     // add KASLR displacement
+       adrp    x6, _end                        // runtime __pa(_end)
+       adrp    x3, _text                       // runtime __pa(_text)
+       sub     x6, x6, x3                      // _end - _text
+       add     x6, x6, x5                      // runtime __va(_end)
+       mov     x7, SWAPPER_RW_MMUFLAGS
 
-       ret     x28
-SYM_FUNC_END(__create_page_tables)
+       map_memory x0, x1, x5, x6, x7, x3, (VA_BITS - PGDIR_SHIFT), x10, x11, x12, x13, x14
+
+       dsb     ishst                           // sync with page table walker
+       ret
+SYM_FUNC_END(create_kernel_mapping)
 
        /*
         * Initialize CPU registers with task-specific and cpu-specific context.
@@ -420,7 +413,7 @@ SYM_FUNC_END(__create_page_tables)
 /*
  * The following fragment of code is executed with the MMU enabled.
  *
- *   x0 = __PHYS_OFFSET
+ *   x0 = __pa(KERNEL_START)
  */
 SYM_FUNC_START_LOCAL(__primary_switched)
        adr_l   x4, init_task
@@ -439,6 +432,9 @@ SYM_FUNC_START_LOCAL(__primary_switched)
        sub     x4, x4, x0                      // the kernel virtual and
        str_l   x4, kimage_voffset, x5          // physical mappings
 
+       mov     x0, x20
+       bl      set_cpu_boot_mode_flag
+
        // Clear BSS
        adr_l   x0, __bss_start
        mov     x1, xzr
@@ -447,35 +443,30 @@ SYM_FUNC_START_LOCAL(__primary_switched)
        bl      __pi_memset
        dsb     ishst                           // Make zero page visible to PTW
 
+#if VA_BITS > 48
+       adr_l   x8, vabits_actual               // Set this early so KASAN early init
+       str     x25, [x8]                       // ... observes the correct value
+       dc      civac, x8                       // Make visible to booting secondaries
+#endif
+
+#ifdef CONFIG_RANDOMIZE_BASE
+       adrp    x5, memstart_offset_seed        // Save KASLR linear map seed
+       strh    w24, [x5, :lo12:memstart_offset_seed]
+#endif
 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
        bl      kasan_early_init
 #endif
        mov     x0, x21                         // pass FDT address in x0
        bl      early_fdt_map                   // Try mapping the FDT early
+       mov     x0, x20                         // pass the full boot status
        bl      init_feature_override           // Parse cpu feature overrides
-#ifdef CONFIG_RANDOMIZE_BASE
-       tst     x23, ~(MIN_KIMG_ALIGN - 1)      // already running randomized?
-       b.ne    0f
-       bl      kaslr_early_init                // parse FDT for KASLR options
-       cbz     x0, 0f                          // KASLR disabled? just proceed
-       orr     x23, x23, x0                    // record KASLR offset
-       ldp     x29, x30, [sp], #16             // we must enable KASLR, return
-       ret                                     // to __primary_switch()
-0:
-#endif
-       bl      switch_to_vhe                   // Prefer VHE if possible
+       mov     x0, x20
+       bl      finalise_el2                    // Prefer VHE if possible
        ldp     x29, x30, [sp], #16
        bl      start_kernel
        ASM_BUG()
 SYM_FUNC_END(__primary_switched)
 
-       .pushsection ".rodata", "a"
-SYM_DATA_START(kimage_vaddr)
-       .quad           _text
-SYM_DATA_END(kimage_vaddr)
-EXPORT_SYMBOL(kimage_vaddr)
-       .popsection
-
 /*
  * end early head section, begin head code that is also used for
  * hotplug and needs to have the same protections as the text region
@@ -490,8 +481,9 @@ EXPORT_SYMBOL(kimage_vaddr)
  * Since we cannot always rely on ERET synchronizing writes to sysregs (e.g. if
  * SCTLR_ELx.EOS is clear), we place an ISB prior to ERET.
  *
- * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
- * booted in EL1 or EL2 respectively.
+ * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x0 if
+ * booted in EL1 or EL2 respectively, with the top 32 bits containing
+ * potential context flags. These flags are *not* stored in __boot_cpu_mode.
  */
 SYM_FUNC_START(init_kernel_el)
        mrs     x0, CurrentEL
@@ -520,6 +512,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
        msr     vbar_el2, x0
        isb
 
+       mov_q   x1, INIT_SCTLR_EL1_MMU_OFF
+
        /*
         * Fruity CPUs seem to have HCR_EL2.E2H set to RES1,
         * making it impossible to start in nVHE mode. Is that
@@ -529,34 +523,19 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
        and     x0, x0, #HCR_E2H
        cbz     x0, 1f
 
-       /* Switching to VHE requires a sane SCTLR_EL1 as a start */
-       mov_q   x0, INIT_SCTLR_EL1_MMU_OFF
-       msr_s   SYS_SCTLR_EL12, x0
-
-       /*
-        * Force an eret into a helper "function", and let it return
-        * to our original caller... This makes sure that we have
-        * initialised the basic PSTATE state.
-        */
-       mov     x0, #INIT_PSTATE_EL2
-       msr     spsr_el1, x0
-       adr     x0, __cpu_stick_to_vhe
-       msr     elr_el1, x0
-       eret
+       /* Set a sane SCTLR_EL1, the VHE way */
+       msr_s   SYS_SCTLR_EL12, x1
+       mov     x2, #BOOT_CPU_FLAG_E2H
+       b       2f
 
 1:
-       mov_q   x0, INIT_SCTLR_EL1_MMU_OFF
-       msr     sctlr_el1, x0
-
+       msr     sctlr_el1, x1
+       mov     x2, xzr
+2:
        msr     elr_el2, lr
        mov     w0, #BOOT_CPU_MODE_EL2
+       orr     x0, x0, x2
        eret
-
-__cpu_stick_to_vhe:
-       mov     x0, #HVC_VHE_RESTART
-       hvc     #0
-       mov     x0, #BOOT_CPU_MODE_EL2
-       ret
 SYM_FUNC_END(init_kernel_el)
 
 /*
@@ -569,52 +548,21 @@ SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
        b.ne    1f
        add     x1, x1, #4
 1:     str     w0, [x1]                        // Save CPU boot mode
-       dmb     sy
-       dc      ivac, x1                        // Invalidate potentially stale cache line
        ret
 SYM_FUNC_END(set_cpu_boot_mode_flag)
 
-/*
- * These values are written with the MMU off, but read with the MMU on.
- * Writers will invalidate the corresponding address, discarding up to a
- * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures
- * sufficient alignment that the CWG doesn't overlap another section.
- */
-       .pushsection ".mmuoff.data.write", "aw"
-/*
- * We need to find out the CPU boot mode long after boot, so we need to
- * store it in a writable variable.
- *
- * This is not in .bss, because we set it sufficiently early that the boot-time
- * zeroing of .bss would clobber it.
- */
-SYM_DATA_START(__boot_cpu_mode)
-       .long   BOOT_CPU_MODE_EL2
-       .long   BOOT_CPU_MODE_EL1
-SYM_DATA_END(__boot_cpu_mode)
-/*
- * The booting CPU updates the failed status @__early_cpu_boot_status,
- * with MMU turned off.
- */
-SYM_DATA_START(__early_cpu_boot_status)
-       .quad   0
-SYM_DATA_END(__early_cpu_boot_status)
-
-       .popsection
-
        /*
         * This provides a "holding pen" for platforms to hold all secondary
         * cores are held until we're ready for them to initialise.
         */
 SYM_FUNC_START(secondary_holding_pen)
        bl      init_kernel_el                  // w0=cpu_boot_mode
-       bl      set_cpu_boot_mode_flag
-       mrs     x0, mpidr_el1
+       mrs     x2, mpidr_el1
        mov_q   x1, MPIDR_HWID_BITMASK
-       and     x0, x0, x1
+       and     x2, x2, x1
        adr_l   x3, secondary_holding_pen_release
 pen:   ldr     x4, [x3]
-       cmp     x4, x0
+       cmp     x4, x2
        b.eq    secondary_startup
        wfe
        b       pen
@@ -626,7 +574,6 @@ SYM_FUNC_END(secondary_holding_pen)
         */
 SYM_FUNC_START(secondary_entry)
        bl      init_kernel_el                  // w0=cpu_boot_mode
-       bl      set_cpu_boot_mode_flag
        b       secondary_startup
 SYM_FUNC_END(secondary_entry)
 
@@ -634,16 +581,24 @@ SYM_FUNC_START_LOCAL(secondary_startup)
        /*
         * Common entry point for secondary CPUs.
         */
-       bl      switch_to_vhe
+       mov     x20, x0                         // preserve boot mode
+       bl      finalise_el2
        bl      __cpu_secondary_check52bitva
+#if VA_BITS > 48
+       ldr_l   x0, vabits_actual
+#endif
        bl      __cpu_setup                     // initialise processor
        adrp    x1, swapper_pg_dir
+       adrp    x2, idmap_pg_dir
        bl      __enable_mmu
        ldr     x8, =__secondary_switched
        br      x8
 SYM_FUNC_END(secondary_startup)
 
 SYM_FUNC_START_LOCAL(__secondary_switched)
+       mov     x0, x20
+       bl      set_cpu_boot_mode_flag
+       str_l   xzr, __early_cpu_boot_status, x3
        adr_l   x5, vectors
        msr     vbar_el1, x5
        isb
@@ -691,6 +646,7 @@ SYM_FUNC_END(__secondary_too_slow)
  *
  *  x0  = SCTLR_EL1 value for turning on the MMU.
  *  x1  = TTBR1_EL1 value
+ *  x2  = ID map root table address
  *
  * Returns to the caller via x30/lr. This requires the caller to be covered
  * by the .idmap.text section.
@@ -699,20 +655,15 @@ SYM_FUNC_END(__secondary_too_slow)
  * If it isn't, park the CPU
  */
 SYM_FUNC_START(__enable_mmu)
-       mrs     x2, ID_AA64MMFR0_EL1
-       ubfx    x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
-       cmp     x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
+       mrs     x3, ID_AA64MMFR0_EL1
+       ubfx    x3, x3, #ID_AA64MMFR0_TGRAN_SHIFT, 4
+       cmp     x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
        b.lt    __no_granule_support
-       cmp     x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
+       cmp     x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
        b.gt    __no_granule_support
-       update_early_cpu_boot_status 0, x2, x3
-       adrp    x2, idmap_pg_dir
-       phys_to_ttbr x1, x1
        phys_to_ttbr x2, x2
        msr     ttbr0_el1, x2                   // load TTBR0
-       offset_ttbr1 x1, x3
-       msr     ttbr1_el1, x1                   // load TTBR1
-       isb
+       load_ttbr1 x1, x1, x3
 
        set_sctlr_el1   x0
 
@@ -720,7 +671,7 @@ SYM_FUNC_START(__enable_mmu)
 SYM_FUNC_END(__enable_mmu)
 
 SYM_FUNC_START(__cpu_secondary_check52bitva)
-#ifdef CONFIG_ARM64_VA_BITS_52
+#if VA_BITS > 48
        ldr_l   x0, vabits_actual
        cmp     x0, #52
        b.ne    2f
@@ -755,13 +706,10 @@ SYM_FUNC_START_LOCAL(__relocate_kernel)
         * Iterate over each entry in the relocation table, and apply the
         * relocations in place.
         */
-       ldr     w9, =__rela_offset              // offset to reloc table
-       ldr     w10, =__rela_size               // size of reloc table
-
+       adr_l   x9, __rela_start
+       adr_l   x10, __rela_end
        mov_q   x11, KIMAGE_VADDR               // default virtual offset
        add     x11, x11, x23                   // actual virtual offset
-       add     x9, x9, x11                     // __va(.rela)
-       add     x10, x9, x10                    // __va(.rela) + sizeof(.rela)
 
 0:     cmp     x9, x10
        b.hs    1f
@@ -804,21 +752,9 @@ SYM_FUNC_START_LOCAL(__relocate_kernel)
         * entry in x9, the address being relocated by the current address or
         * bitmap entry in x13 and the address being relocated by the current
         * bit in x14.
-        *
-        * Because addends are stored in place in the binary, RELR relocations
-        * cannot be applied idempotently. We use x24 to keep track of the
-        * currently applied displacement so that we can correctly relocate if
-        * __relocate_kernel is called twice with non-zero displacements (i.e.
-        * if there is both a physical misalignment and a KASLR displacement).
         */
-       ldr     w9, =__relr_offset              // offset to reloc table
-       ldr     w10, =__relr_size               // size of reloc table
-       add     x9, x9, x11                     // __va(.relr)
-       add     x10, x9, x10                    // __va(.relr) + sizeof(.relr)
-
-       sub     x15, x23, x24                   // delta from previous offset
-       cbz     x15, 7f                         // nothing to do if unchanged
-       mov     x24, x23                        // save new offset
+       adr_l   x9, __relr_start
+       adr_l   x10, __relr_end
 
 2:     cmp     x9, x10
        b.hs    7f
@@ -826,7 +762,7 @@ SYM_FUNC_START_LOCAL(__relocate_kernel)
        tbnz    x11, #0, 3f                     // branch to handle bitmaps
        add     x13, x11, x23
        ldr     x12, [x13]                      // relocate address entry
-       add     x12, x12, x15
+       add     x12, x12, x23
        str     x12, [x13], #8                  // adjust to start of bitmap
        b       2b
 
@@ -835,7 +771,7 @@ SYM_FUNC_START_LOCAL(__relocate_kernel)
        cbz     x11, 6f
        tbz     x11, #0, 5f                     // skip bit if not set
        ldr     x12, [x14]                      // relocate bit
-       add     x12, x12, x15
+       add     x12, x12, x23
        str     x12, [x14]
 
 5:     add     x14, x14, #8                    // move to next bit's address
@@ -856,43 +792,32 @@ SYM_FUNC_END(__relocate_kernel)
 #endif
 
 SYM_FUNC_START_LOCAL(__primary_switch)
+       adrp    x1, reserved_pg_dir
+       adrp    x2, init_idmap_pg_dir
+       bl      __enable_mmu
+#ifdef CONFIG_RELOCATABLE
+       adrp    x23, KERNEL_START
+       and     x23, x23, MIN_KIMG_ALIGN - 1
 #ifdef CONFIG_RANDOMIZE_BASE
-       mov     x19, x0                         // preserve new SCTLR_EL1 value
-       mrs     x20, sctlr_el1                  // preserve old SCTLR_EL1 value
+       mov     x0, x22
+       adrp    x1, init_pg_end
+       mov     sp, x1
+       mov     x29, xzr
+       bl      __pi_kaslr_early_init
+       and     x24, x0, #SZ_2M - 1             // capture memstart offset seed
+       bic     x0, x0, #SZ_2M - 1
+       orr     x23, x23, x0                    // record kernel offset
+#endif
 #endif
+       bl      clear_page_tables
+       bl      create_kernel_mapping
 
        adrp    x1, init_pg_dir
-       bl      __enable_mmu
+       load_ttbr1 x1, x1, x2
 #ifdef CONFIG_RELOCATABLE
-#ifdef CONFIG_RELR
-       mov     x24, #0                         // no RELR displacement yet
-#endif
        bl      __relocate_kernel
-#ifdef CONFIG_RANDOMIZE_BASE
-       ldr     x8, =__primary_switched
-       adrp    x0, __PHYS_OFFSET
-       blr     x8
-
-       /*
-        * If we return here, we have a KASLR displacement in x23 which we need
-        * to take into account by discarding the current kernel mapping and
-        * creating a new one.
-        */
-       pre_disable_mmu_workaround
-       msr     sctlr_el1, x20                  // disable the MMU
-       isb
-       bl      __create_page_tables            // recreate kernel mapping
-
-       tlbi    vmalle1                         // Remove any stale TLB entries
-       dsb     nsh
-       isb
-
-       set_sctlr_el1   x19                     // re-enable the MMU
-
-       bl      __relocate_kernel
-#endif
 #endif
        ldr     x8, =__primary_switched
-       adrp    x0, __PHYS_OFFSET
+       adrp    x0, KERNEL_START                // __pa(KERNEL_START)
        br      x8
 SYM_FUNC_END(__primary_switch)
index 2e24834..af5df48 100644 (file)
@@ -300,11 +300,6 @@ static void swsusp_mte_restore_tags(void)
                unsigned long pfn = xa_state.xa_index;
                struct page *page = pfn_to_online_page(pfn);
 
-               /*
-                * It is not required to invoke page_kasan_tag_reset(page)
-                * at this point since the tags stored in page->flags are
-                * already restored.
-                */
                mte_restore_page_tags(page_address(page), tags);
 
                mte_free_tag_storage(tags);
index 43d2126..12c7fad 100644 (file)
 #include <asm/ptrace.h>
 #include <asm/virt.h>
 
+// Warning, hardcoded register allocation
+// This will clobber x1 and x2, and expect x1 to contain
+// the id register value as read from the HW
+.macro __check_override idreg, fld, width, pass, fail
+       ubfx    x1, x1, #\fld, #\width
+       cbz     x1, \fail
+
+       adr_l   x1, \idreg\()_override
+       ldr     x2, [x1, FTR_OVR_VAL_OFFSET]
+       ldr     x1, [x1, FTR_OVR_MASK_OFFSET]
+       ubfx    x2, x2, #\fld, #\width
+       ubfx    x1, x1, #\fld, #\width
+       cmp     x1, xzr
+       and     x2, x2, x1
+       csinv   x2, x2, xzr, ne
+       cbnz    x2, \pass
+       b       \fail
+.endm
+
+.macro check_override idreg, fld, pass, fail
+       mrs     x1, \idreg\()_el1
+       __check_override \idreg \fld 4 \pass \fail
+.endm
+
        .text
        .pushsection    .hyp.text, "ax"
 
@@ -51,8 +75,8 @@ SYM_CODE_START_LOCAL(elx_sync)
        msr     vbar_el2, x1
        b       9f
 
-1:     cmp     x0, #HVC_VHE_RESTART
-       b.eq    mutate_to_vhe
+1:     cmp     x0, #HVC_FINALISE_EL2
+       b.eq    __finalise_el2
 
 2:     cmp     x0, #HVC_SOFT_RESTART
        b.ne    3f
@@ -73,27 +97,67 @@ SYM_CODE_START_LOCAL(elx_sync)
        eret
 SYM_CODE_END(elx_sync)
 
-// nVHE? No way! Give me the real thing!
-SYM_CODE_START_LOCAL(mutate_to_vhe)
+SYM_CODE_START_LOCAL(__finalise_el2)
+       check_override id_aa64pfr0 ID_AA64PFR0_SVE_SHIFT .Linit_sve .Lskip_sve
+
+.Linit_sve:    /* SVE register access */
+       mrs     x0, cptr_el2                    // Disable SVE traps
+       bic     x0, x0, #CPTR_EL2_TZ
+       msr     cptr_el2, x0
+       isb
+       mov     x1, #ZCR_ELx_LEN_MASK           // SVE: Enable full vector
+       msr_s   SYS_ZCR_EL2, x1                 // length for EL1.
+
+.Lskip_sve:
+       check_override id_aa64pfr1 ID_AA64PFR1_SME_SHIFT .Linit_sme .Lskip_sme
+
+.Linit_sme:    /* SME register access and priority mapping */
+       mrs     x0, cptr_el2                    // Disable SME traps
+       bic     x0, x0, #CPTR_EL2_TSM
+       msr     cptr_el2, x0
+       isb
+
+       mrs     x1, sctlr_el2
+       orr     x1, x1, #SCTLR_ELx_ENTP2        // Disable TPIDR2 traps
+       msr     sctlr_el2, x1
+       isb
+
+       mov     x0, #0                          // SMCR controls
+
+       // Full FP in SM?
+       mrs_s   x1, SYS_ID_AA64SMFR0_EL1
+       __check_override id_aa64smfr0 ID_AA64SMFR0_EL1_FA64_SHIFT 1 .Linit_sme_fa64 .Lskip_sme_fa64
+
+.Linit_sme_fa64:
+       orr     x0, x0, SMCR_ELx_FA64_MASK
+.Lskip_sme_fa64:
+
+       orr     x0, x0, #SMCR_ELx_LEN_MASK      // Enable full SME vector
+       msr_s   SYS_SMCR_EL2, x0                // length for EL1.
+
+       mrs_s   x1, SYS_SMIDR_EL1               // Priority mapping supported?
+       ubfx    x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1
+       cbz     x1, .Lskip_sme
+
+       msr_s   SYS_SMPRIMAP_EL2, xzr           // Make all priorities equal
+
+       mrs     x1, id_aa64mmfr1_el1            // HCRX_EL2 present?
+       ubfx    x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4
+       cbz     x1, .Lskip_sme
+
+       mrs_s   x1, SYS_HCRX_EL2
+       orr     x1, x1, #HCRX_EL2_SMPME_MASK    // Enable priority mapping
+       msr_s   SYS_HCRX_EL2, x1
+
+.Lskip_sme:
+
+       // nVHE? No way! Give me the real thing!
        // Sanity check: MMU *must* be off
        mrs     x1, sctlr_el2
        tbnz    x1, #0, 1f
 
        // Needs to be VHE capable, obviously
-       mrs     x1, id_aa64mmfr1_el1
-       ubfx    x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4
-       cbz     x1, 1f
-
-       // Check whether VHE is disabled from the command line
-       adr_l   x1, id_aa64mmfr1_override
-       ldr     x2, [x1, FTR_OVR_VAL_OFFSET]
-       ldr     x1, [x1, FTR_OVR_MASK_OFFSET]
-       ubfx    x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
-       ubfx    x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4
-       cmp     x1, xzr
-       and     x2, x2, x1
-       csinv   x2, x2, xzr, ne
-       cbnz    x2, 2f
+       check_override id_aa64mmfr1 ID_AA64MMFR1_VHE_SHIFT 2f 1f
 
 1:     mov_q   x0, HVC_STUB_ERR
        eret
@@ -140,10 +204,10 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
        msr     spsr_el1, x0
 
        b       enter_vhe
-SYM_CODE_END(mutate_to_vhe)
+SYM_CODE_END(__finalise_el2)
 
        // At the point where we reach enter_vhe(), we run with
-       // the MMU off (which is enforced by mutate_to_vhe()).
+       // the MMU off (which is enforced by __finalise_el2()).
        // We thus need to be in the idmap, or everything will
        // explode when enabling the MMU.
 
@@ -222,12 +286,12 @@ SYM_FUNC_START(__hyp_reset_vectors)
 SYM_FUNC_END(__hyp_reset_vectors)
 
 /*
- * Entry point to switch to VHE if deemed capable
+ * Entry point to finalise EL2 and switch to VHE if deemed capable
+ *
+ * w0: boot mode, as returned by init_kernel_el()
  */
-SYM_FUNC_START(switch_to_vhe)
+SYM_FUNC_START(finalise_el2)
        // Need to have booted at EL2
-       adr_l   x1, __boot_cpu_mode
-       ldr     w0, [x1]
        cmp     w0, #BOOT_CPU_MODE_EL2
        b.ne    1f
 
@@ -236,9 +300,8 @@ SYM_FUNC_START(switch_to_vhe)
        cmp     x0, #CurrentEL_EL1
        b.ne    1f
 
-       // Turn the world upside down
-       mov     x0, #HVC_VHE_RESTART
+       mov     x0, #HVC_FINALISE_EL2
        hvc     #0
 1:
        ret
-SYM_FUNC_END(switch_to_vhe)
+SYM_FUNC_END(finalise_el2)
index 8a2ceb5..1b0542c 100644 (file)
 #define FTR_ALIAS_NAME_LEN     30
 #define FTR_ALIAS_OPTION_LEN   116
 
+static u64 __boot_status __initdata;
+
 struct ftr_set_desc {
        char                            name[FTR_DESC_NAME_LEN];
        struct arm64_ftr_override       *override;
        struct {
                char                    name[FTR_DESC_FIELD_LEN];
                u8                      shift;
+               u8                      width;
                bool                    (*filter)(u64 val);
        }                               fields[];
 };
 
+#define FIELD(n, s, f) { .name = n, .shift = s, .width = 4, .filter = f }
+
 static bool __init mmfr1_vh_filter(u64 val)
 {
        /*
@@ -37,24 +42,65 @@ static bool __init mmfr1_vh_filter(u64 val)
         * the user was trying to force nVHE on us, proceed with
         * attitude adjustment.
         */
-       return !(is_kernel_in_hyp_mode() && val == 0);
+       return !(__boot_status == (BOOT_CPU_FLAG_E2H | BOOT_CPU_MODE_EL2) &&
+                val == 0);
 }
 
 static const struct ftr_set_desc mmfr1 __initconst = {
        .name           = "id_aa64mmfr1",
        .override       = &id_aa64mmfr1_override,
        .fields         = {
-               { "vh", ID_AA64MMFR1_VHE_SHIFT, mmfr1_vh_filter },
+               FIELD("vh", ID_AA64MMFR1_VHE_SHIFT, mmfr1_vh_filter),
+               {}
+       },
+};
+
+static bool __init pfr0_sve_filter(u64 val)
+{
+       /*
+        * Disabling SVE also means disabling all the features that
+        * are associated with it. The easiest way to do it is just to
+        * override id_aa64zfr0_el1 to be 0.
+        */
+       if (!val) {
+               id_aa64zfr0_override.val = 0;
+               id_aa64zfr0_override.mask = GENMASK(63, 0);
+       }
+
+       return true;
+}
+
+static const struct ftr_set_desc pfr0 __initconst = {
+       .name           = "id_aa64pfr0",
+       .override       = &id_aa64pfr0_override,
+       .fields         = {
+               FIELD("sve", ID_AA64PFR0_SVE_SHIFT, pfr0_sve_filter),
                {}
        },
 };
 
+static bool __init pfr1_sme_filter(u64 val)
+{
+       /*
+        * Similarly to SVE, disabling SME also means disabling all
+        * the features that are associated with it. Just set
+        * id_aa64smfr0_el1 to 0 and don't look back.
+        */
+       if (!val) {
+               id_aa64smfr0_override.val = 0;
+               id_aa64smfr0_override.mask = GENMASK(63, 0);
+       }
+
+       return true;
+}
+
 static const struct ftr_set_desc pfr1 __initconst = {
        .name           = "id_aa64pfr1",
        .override       = &id_aa64pfr1_override,
        .fields         = {
-               { "bt", ID_AA64PFR1_BT_SHIFT },
-               { "mte", ID_AA64PFR1_MTE_SHIFT},
+               FIELD("bt", ID_AA64PFR1_BT_SHIFT, NULL ),
+               FIELD("mte", ID_AA64PFR1_MTE_SHIFT, NULL),
+               FIELD("sme", ID_AA64PFR1_SME_SHIFT, pfr1_sme_filter),
                {}
        },
 };
@@ -63,10 +109,10 @@ static const struct ftr_set_desc isar1 __initconst = {
        .name           = "id_aa64isar1",
        .override       = &id_aa64isar1_override,
        .fields         = {
-               { "gpi", ID_AA64ISAR1_GPI_SHIFT },
-               { "gpa", ID_AA64ISAR1_GPA_SHIFT },
-               { "api", ID_AA64ISAR1_API_SHIFT },
-               { "apa", ID_AA64ISAR1_APA_SHIFT },
+               FIELD("gpi", ID_AA64ISAR1_EL1_GPI_SHIFT, NULL),
+               FIELD("gpa", ID_AA64ISAR1_EL1_GPA_SHIFT, NULL),
+               FIELD("api", ID_AA64ISAR1_EL1_API_SHIFT, NULL),
+               FIELD("apa", ID_AA64ISAR1_EL1_APA_SHIFT, NULL),
                {}
        },
 };
@@ -75,8 +121,18 @@ static const struct ftr_set_desc isar2 __initconst = {
        .name           = "id_aa64isar2",
        .override       = &id_aa64isar2_override,
        .fields         = {
-               { "gpa3", ID_AA64ISAR2_GPA3_SHIFT },
-               { "apa3", ID_AA64ISAR2_APA3_SHIFT },
+               FIELD("gpa3", ID_AA64ISAR2_EL1_GPA3_SHIFT, NULL),
+               FIELD("apa3", ID_AA64ISAR2_EL1_APA3_SHIFT, NULL),
+               {}
+       },
+};
+
+static const struct ftr_set_desc smfr0 __initconst = {
+       .name           = "id_aa64smfr0",
+       .override       = &id_aa64smfr0_override,
+       .fields         = {
+               /* FA64 is a one bit field... :-/ */
+               { "fa64", ID_AA64SMFR0_EL1_FA64_SHIFT, 1, },
                {}
        },
 };
@@ -89,16 +145,18 @@ static const struct ftr_set_desc kaslr __initconst = {
        .override       = &kaslr_feature_override,
 #endif
        .fields         = {
-               { "disabled", 0 },
+               FIELD("disabled", 0, NULL),
                {}
        },
 };
 
 static const struct ftr_set_desc * const regs[] __initconst = {
        &mmfr1,
+       &pfr0,
        &pfr1,
        &isar1,
        &isar2,
+       &smfr0,
        &kaslr,
 };
 
@@ -108,6 +166,8 @@ static const struct {
 } aliases[] __initconst = {
        { "kvm-arm.mode=nvhe",          "id_aa64mmfr1.vh=0" },
        { "kvm-arm.mode=protected",     "id_aa64mmfr1.vh=0" },
+       { "arm64.nosve",                "id_aa64pfr0.sve=0 id_aa64pfr1.sme=0" },
+       { "arm64.nosme",                "id_aa64pfr1.sme=0" },
        { "arm64.nobti",                "id_aa64pfr1.bt=0" },
        { "arm64.nopauth",
          "id_aa64isar1.gpi=0 id_aa64isar1.gpa=0 "
@@ -144,7 +204,8 @@ static void __init match_options(const char *cmdline)
 
                for (f = 0; strlen(regs[i]->fields[f].name); f++) {
                        u64 shift = regs[i]->fields[f].shift;
-                       u64 mask = 0xfUL << shift;
+                       u64 width = regs[i]->fields[f].width ?: 4;
+                       u64 mask = GENMASK_ULL(shift + width - 1, shift);
                        u64 v;
 
                        if (find_field(cmdline, regs[i], f, &v))
@@ -152,7 +213,7 @@ static void __init match_options(const char *cmdline)
 
                        /*
                         * If an override gets filtered out, advertise
-                        * it by setting the value to 0xf, but
+                        * it by setting the value to the all-ones while
                         * clearing the mask... Yes, this is fragile.
                         */
                        if (regs[i]->fields[f].filter &&
@@ -234,9 +295,9 @@ static __init void parse_cmdline(void)
 }
 
 /* Keep checkers quiet */
-void init_feature_override(void);
+void init_feature_override(u64 boot_status);
 
-asmlinkage void __init init_feature_override(void)
+asmlinkage void __init init_feature_override(u64 boot_status)
 {
        int i;
 
@@ -247,6 +308,8 @@ asmlinkage void __init init_feature_override(void)
                }
        }
 
+       __boot_status = boot_status;
+
        parse_cmdline();
 
        for (i = 0; i < ARRAY_SIZE(regs); i++) {
index 241c86b..afa69e0 100644 (file)
 #error This file should only be included in vmlinux.lds.S
 #endif
 
-#ifdef CONFIG_EFI
-
-__efistub_kernel_size          = _edata - _text;
-__efistub_primary_entry_offset = primary_entry - _text;
-
+PROVIDE(__efistub_kernel_size          = _edata - _text);
+PROVIDE(__efistub_primary_entry_offset = primary_entry - _text);
 
 /*
  * The EFI stub has its own symbol namespace prefixed by __efistub_, to
@@ -25,31 +22,37 @@ __efistub_primary_entry_offset      = primary_entry - _text;
  * linked at. The routines below are all implemented in assembler in a
  * position independent manner
  */
-__efistub_memcmp               = __pi_memcmp;
-__efistub_memchr               = __pi_memchr;
-__efistub_memcpy               = __pi_memcpy;
-__efistub_memmove              = __pi_memmove;
-__efistub_memset               = __pi_memset;
-__efistub_strlen               = __pi_strlen;
-__efistub_strnlen              = __pi_strnlen;
-__efistub_strcmp               = __pi_strcmp;
-__efistub_strncmp              = __pi_strncmp;
-__efistub_strrchr              = __pi_strrchr;
-__efistub_dcache_clean_poc = __pi_dcache_clean_poc;
-
-#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
-__efistub___memcpy             = __pi_memcpy;
-__efistub___memmove            = __pi_memmove;
-__efistub___memset             = __pi_memset;
-#endif
+PROVIDE(__efistub_memcmp               = __pi_memcmp);
+PROVIDE(__efistub_memchr               = __pi_memchr);
+PROVIDE(__efistub_memcpy               = __pi_memcpy);
+PROVIDE(__efistub_memmove              = __pi_memmove);
+PROVIDE(__efistub_memset               = __pi_memset);
+PROVIDE(__efistub_strlen               = __pi_strlen);
+PROVIDE(__efistub_strnlen              = __pi_strnlen);
+PROVIDE(__efistub_strcmp               = __pi_strcmp);
+PROVIDE(__efistub_strncmp              = __pi_strncmp);
+PROVIDE(__efistub_strrchr              = __pi_strrchr);
+PROVIDE(__efistub_dcache_clean_poc     = __pi_dcache_clean_poc);
+
+PROVIDE(__efistub__text                        = _text);
+PROVIDE(__efistub__end                 = _end);
+PROVIDE(__efistub__edata               = _edata);
+PROVIDE(__efistub_screen_info          = screen_info);
+PROVIDE(__efistub__ctype               = _ctype);
 
-__efistub__text                        = _text;
-__efistub__end                 = _end;
-__efistub__edata               = _edata;
-__efistub_screen_info          = screen_info;
-__efistub__ctype               = _ctype;
+/*
+ * The __ prefixed memcpy/memset/memmove symbols are provided by KASAN, which
+ * instruments the conventional ones. Therefore, any references from the EFI
+ * stub or other position independent, low level C code should be redirected to
+ * the non-instrumented versions as well.
+ */
+PROVIDE(__efistub___memcpy             = __pi_memcpy);
+PROVIDE(__efistub___memmove            = __pi_memmove);
+PROVIDE(__efistub___memset             = __pi_memset);
 
-#endif
+PROVIDE(__pi___memcpy                  = __pi_memcpy);
+PROVIDE(__pi___memmove                 = __pi_memmove);
+PROVIDE(__pi___memset                  = __pi_memset);
 
 #ifdef CONFIG_KVM
 
index 418b2bb..325455d 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/pgtable.h>
 #include <linux/random.h>
 
-#include <asm/cacheflush.h>
 #include <asm/fixmap.h>
 #include <asm/kernel-pgtable.h>
 #include <asm/memory.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 
-enum kaslr_status {
-       KASLR_ENABLED,
-       KASLR_DISABLED_CMDLINE,
-       KASLR_DISABLED_NO_SEED,
-       KASLR_DISABLED_FDT_REMAP,
-};
-
-static enum kaslr_status __initdata kaslr_status;
 u64 __ro_after_init module_alloc_base;
 u16 __initdata memstart_offset_seed;
 
-static __init u64 get_kaslr_seed(void *fdt)
-{
-       int node, len;
-       fdt64_t *prop;
-       u64 ret;
-
-       node = fdt_path_offset(fdt, "/chosen");
-       if (node < 0)
-               return 0;
-
-       prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
-       if (!prop || len != sizeof(u64))
-               return 0;
-
-       ret = fdt64_to_cpu(*prop);
-       *prop = 0;
-       return ret;
-}
-
 struct arm64_ftr_override kaslr_feature_override __initdata;
 
-/*
- * This routine will be executed with the kernel mapped at its default virtual
- * address, and if it returns successfully, the kernel will be remapped, and
- * start_kernel() will be executed from a randomized virtual offset. The
- * relocation will result in all absolute references (e.g., static variables
- * containing function pointers) to be reinitialized, and zero-initialized
- * .bss variables will be reset to 0.
- */
-u64 __init kaslr_early_init(void)
+static int __init kaslr_init(void)
 {
-       void *fdt;
-       u64 seed, offset, mask, module_range;
-       unsigned long raw;
+       u64 module_range;
+       u32 seed;
 
        /*
         * Set a reasonable default for module_alloc_base in case
         * we end up running with module randomization disabled.
         */
        module_alloc_base = (u64)_etext - MODULES_VSIZE;
-       dcache_clean_inval_poc((unsigned long)&module_alloc_base,
-                           (unsigned long)&module_alloc_base +
-                                   sizeof(module_alloc_base));
-
-       /*
-        * Try to map the FDT early. If this fails, we simply bail,
-        * and proceed with KASLR disabled. We will make another
-        * attempt at mapping the FDT in setup_machine()
-        */
-       fdt = get_early_fdt_ptr();
-       if (!fdt) {
-               kaslr_status = KASLR_DISABLED_FDT_REMAP;
-               return 0;
-       }
 
-       /*
-        * Retrieve (and wipe) the seed from the FDT
-        */
-       seed = get_kaslr_seed(fdt);
-
-       /*
-        * Check if 'nokaslr' appears on the command line, and
-        * return 0 if that is the case.
-        */
        if (kaslr_feature_override.val & kaslr_feature_override.mask & 0xf) {
-               kaslr_status = KASLR_DISABLED_CMDLINE;
+               pr_info("KASLR disabled on command line\n");
                return 0;
        }
 
-       /*
-        * Mix in any entropy obtainable architecturally if enabled
-        * and supported.
-        */
-
-       if (arch_get_random_seed_long_early(&raw))
-               seed ^= raw;
-
-       if (!seed) {
-               kaslr_status = KASLR_DISABLED_NO_SEED;
+       if (!kaslr_offset()) {
+               pr_warn("KASLR disabled due to lack of seed\n");
                return 0;
        }
 
+       pr_info("KASLR enabled\n");
+
        /*
-        * OK, so we are proceeding with KASLR enabled. Calculate a suitable
-        * kernel image offset from the seed. Let's place the kernel in the
-        * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of
-        * the lower and upper quarters to avoid colliding with other
-        * allocations.
-        * Even if we could randomize at page granularity for 16k and 64k pages,
-        * let's always round to 2 MB so we don't interfere with the ability to
-        * map using contiguous PTEs
+        * KASAN without KASAN_VMALLOC does not expect the module region to
+        * intersect the vmalloc region, since shadow memory is allocated for
+        * each module at load time, whereas the vmalloc region will already be
+        * shadowed by KASAN zero pages.
         */
-       mask = ((1UL << (VA_BITS_MIN - 2)) - 1) & ~(SZ_2M - 1);
-       offset = BIT(VA_BITS_MIN - 3) + (seed & mask);
+       BUILD_BUG_ON((IS_ENABLED(CONFIG_KASAN_GENERIC) ||
+                     IS_ENABLED(CONFIG_KASAN_SW_TAGS)) &&
+                    !IS_ENABLED(CONFIG_KASAN_VMALLOC));
 
-       /* use the top 16 bits to randomize the linear region */
-       memstart_offset_seed = seed >> 48;
-
-       if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) &&
-           (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
-            IS_ENABLED(CONFIG_KASAN_SW_TAGS)))
-               /*
-                * KASAN without KASAN_VMALLOC does not expect the module region
-                * to intersect the vmalloc region, since shadow memory is
-                * allocated for each module at load time, whereas the vmalloc
-                * region is shadowed by KASAN zero pages. So keep modules
-                * out of the vmalloc region if KASAN is enabled without
-                * KASAN_VMALLOC, and put the kernel well within 4 GB of the
-                * module region.
-                */
-               return offset % SZ_2G;
+       seed = get_random_u32();
 
        if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
                /*
@@ -154,8 +70,7 @@ u64 __init kaslr_early_init(void)
                 * resolved normally.)
                 */
                module_range = SZ_2G - (u64)(_end - _stext);
-               module_alloc_base = max((u64)_end + offset - SZ_2G,
-                                       (u64)MODULES_VADDR);
+               module_alloc_base = max((u64)_end - SZ_2G, (u64)MODULES_VADDR);
        } else {
                /*
                 * Randomize the module region by setting module_alloc_base to
@@ -167,40 +82,12 @@ u64 __init kaslr_early_init(void)
                 * when ARM64_MODULE_PLTS is enabled.
                 */
                module_range = MODULES_VSIZE - (u64)(_etext - _stext);
-               module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
        }
 
        /* use the lower 21 bits to randomize the base of the module region */
        module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
        module_alloc_base &= PAGE_MASK;
 
-       dcache_clean_inval_poc((unsigned long)&module_alloc_base,
-                           (unsigned long)&module_alloc_base +
-                                   sizeof(module_alloc_base));
-       dcache_clean_inval_poc((unsigned long)&memstart_offset_seed,
-                           (unsigned long)&memstart_offset_seed +
-                                   sizeof(memstart_offset_seed));
-
-       return offset;
-}
-
-static int __init kaslr_init(void)
-{
-       switch (kaslr_status) {
-       case KASLR_ENABLED:
-               pr_info("KASLR enabled\n");
-               break;
-       case KASLR_DISABLED_CMDLINE:
-               pr_info("KASLR disabled on command line\n");
-               break;
-       case KASLR_DISABLED_NO_SEED:
-               pr_warn("KASLR disabled due to lack of seed\n");
-               break;
-       case KASLR_DISABLED_FDT_REMAP:
-               pr_warn("KASLR disabled due to FDT remapping failure\n");
-               break;
-       }
-
        return 0;
 }
-core_initcall(kaslr_init)
+subsys_initcall(kaslr_init)
index 42bd8c0..692e9d2 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <asm/unistd.h>
 
+       .section .rodata
        .align  5
        .globl  __kuser_helper_start
 __kuser_helper_start:
index f6b0074..b2b7302 100644 (file)
@@ -48,15 +48,6 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
        if (!pte_is_tagged)
                return;
 
-       page_kasan_tag_reset(page);
-       /*
-        * We need smp_wmb() in between setting the flags and clearing the
-        * tags because if another thread reads page->flags and builds a
-        * tagged address out of it, there is an actual dependency to the
-        * memory access, but on the current thread we do not guarantee that
-        * the new page->flags are visible before the tags were updated.
-        */
-       smp_wmb();
        mte_clear_page_tags(page_address(page));
 }
 
diff --git a/arch/arm64/kernel/pi/Makefile b/arch/arm64/kernel/pi/Makefile
new file mode 100644 (file)
index 0000000..8392914
--- /dev/null
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2022 Google LLC
+
+KBUILD_CFLAGS  := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \
+                  -Os -DDISABLE_BRANCH_PROFILING $(DISABLE_STACKLEAK_PLUGIN) \
+                  $(call cc-option,-mbranch-protection=none) \
+                  -I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \
+                  -include $(srctree)/include/linux/hidden.h \
+                  -D__DISABLE_EXPORTS -ffreestanding -D__NO_FORTIFY \
+                  $(call cc-option,-fno-addrsig)
+
+# remove SCS flags from all objects in this directory
+KBUILD_CFLAGS  := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
+# disable LTO
+KBUILD_CFLAGS  := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
+
+GCOV_PROFILE   := n
+KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
+UBSAN_SANITIZE := n
+KCOV_INSTRUMENT        := n
+
+$(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \
+                              --remove-section=.note.gnu.property \
+                              --prefix-alloc-sections=.init
+$(obj)/%.pi.o: $(obj)/%.o FORCE
+       $(call if_changed,objcopy)
+
+$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
+       $(call if_changed_rule,cc_o_c)
+
+obj-y          := kaslr_early.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o
+extra-y                := $(patsubst %.pi.o,%.o,$(obj-y))
diff --git a/arch/arm64/kernel/pi/kaslr_early.c b/arch/arm64/kernel/pi/kaslr_early.c
new file mode 100644 (file)
index 0000000..6c3855e
--- /dev/null
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2022 Google LLC
+// Author: Ard Biesheuvel <ardb@google.com>
+
+// NOTE: code in this file runs *very* early, and is not permitted to use
+// global variables or anything that relies on absolute addressing.
+
+#include <linux/libfdt.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+#include <linux/string.h>
+
+#include <asm/archrandom.h>
+#include <asm/memory.h>
+
+/* taken from lib/string.c */
+static char *__strstr(const char *s1, const char *s2)
+{
+       size_t l1, l2;
+
+       l2 = strlen(s2);
+       if (!l2)
+               return (char *)s1;
+       l1 = strlen(s1);
+       while (l1 >= l2) {
+               l1--;
+               if (!memcmp(s1, s2, l2))
+                       return (char *)s1;
+               s1++;
+       }
+       return NULL;
+}
+static bool cmdline_contains_nokaslr(const u8 *cmdline)
+{
+       const u8 *str;
+
+       str = __strstr(cmdline, "nokaslr");
+       return str == cmdline || (str > cmdline && *(str - 1) == ' ');
+}
+
+static bool is_kaslr_disabled_cmdline(void *fdt)
+{
+       if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
+               int node;
+               const u8 *prop;
+
+               node = fdt_path_offset(fdt, "/chosen");
+               if (node < 0)
+                       goto out;
+
+               prop = fdt_getprop(fdt, node, "bootargs", NULL);
+               if (!prop)
+                       goto out;
+
+               if (cmdline_contains_nokaslr(prop))
+                       return true;
+
+               if (IS_ENABLED(CONFIG_CMDLINE_EXTEND))
+                       goto out;
+
+               return false;
+       }
+out:
+       return cmdline_contains_nokaslr(CONFIG_CMDLINE);
+}
+
+static u64 get_kaslr_seed(void *fdt)
+{
+       int node, len;
+       fdt64_t *prop;
+       u64 ret;
+
+       node = fdt_path_offset(fdt, "/chosen");
+       if (node < 0)
+               return 0;
+
+       prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
+       if (!prop || len != sizeof(u64))
+               return 0;
+
+       ret = fdt64_to_cpu(*prop);
+       *prop = 0;
+       return ret;
+}
+
+asmlinkage u64 kaslr_early_init(void *fdt)
+{
+       u64 seed;
+
+       if (is_kaslr_disabled_cmdline(fdt))
+               return 0;
+
+       seed = get_kaslr_seed(fdt);
+       if (!seed) {
+#ifdef CONFIG_ARCH_RANDOM
+                if (!__early_cpu_has_rndr() ||
+                    !__arm64_rndr((unsigned long *)&seed))
+#endif
+               return 0;
+       }
+
+       /*
+        * OK, so we are proceeding with KASLR enabled. Calculate a suitable
+        * kernel image offset from the seed. Let's place the kernel in the
+        * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of
+        * the lower and upper quarters to avoid colliding with other
+        * allocations.
+        */
+       return BIT(VA_BITS_MIN - 3) + (seed & GENMASK(VA_BITS_MIN - 3, 0));
+}
index cf3a759..fea3223 100644 (file)
@@ -303,14 +303,13 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
        early_fixmap_init();
        early_ioremap_init();
 
+       setup_machine_fdt(__fdt_pointer);
+
        /*
         * Initialise the static keys early as they may be enabled by the
-        * cpufeature code, early parameters, and DT setup.
+        * cpufeature code and early parameters.
         */
        jump_label_init();
-
-       setup_machine_fdt(__fdt_pointer);
-
        parse_early_param();
 
        /*
index b0980fb..3e6d035 100644 (file)
@@ -280,6 +280,9 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
 
                vl = task_get_sme_vl(current);
        } else {
+               if (!system_supports_sve())
+                       return -EINVAL;
+
                vl = task_get_sve_vl(current);
        }
 
@@ -342,9 +345,14 @@ fpsimd_only:
 
 #else /* ! CONFIG_ARM64_SVE */
 
-/* Turn any non-optimised out attempts to use these into a link error: */
+static int restore_sve_fpsimd_context(struct user_ctxs *user)
+{
+       WARN_ON_ONCE(1);
+       return -EINVAL;
+}
+
+/* Turn any non-optimised out attempts to use this into a link error: */
 extern int preserve_sve_context(void __user *ctx);
-extern int restore_sve_fpsimd_context(struct user_ctxs *user);
 
 #endif /* ! CONFIG_ARM64_SVE */
 
@@ -649,14 +657,10 @@ static int restore_sigframe(struct pt_regs *regs,
                if (!user.fpsimd)
                        return -EINVAL;
 
-               if (user.sve) {
-                       if (!system_supports_sve())
-                               return -EINVAL;
-
+               if (user.sve)
                        err = restore_sve_fpsimd_context(&user);
-               } else {
+               else
                        err = restore_fpsimd_context(user.fpsimd);
-               }
        }
 
        if (err == 0 && system_supports_sme() && user.za)
index 475d30d..ccbd4aa 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <asm/unistd.h>
 
+       .section .rodata
        .globl __aarch32_sigret_code_start
 __aarch32_sigret_code_start:
 
index 4ea9392..617f78a 100644 (file)
@@ -100,10 +100,11 @@ SYM_FUNC_END(__cpu_suspend_enter)
        .pushsection ".idmap.text", "awx"
 SYM_CODE_START(cpu_resume)
        bl      init_kernel_el
-       bl      switch_to_vhe
+       bl      finalise_el2
        bl      __cpu_setup
        /* enable the MMU early - so we can access sleep_save_stash by va */
        adrp    x1, swapper_pg_dir
+       adrp    x2, idmap_pg_dir
        bl      __enable_mmu
        ldr     x8, =_cpu_resume
        br      x8
index 0467cb7..fcaa151 100644 (file)
@@ -38,6 +38,8 @@
  * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
  *               associated with the most recently encountered replacement lr
  *               value.
+ *
+ * @task:        The task being unwound.
  */
 struct unwind_state {
        unsigned long fp;
@@ -48,13 +50,13 @@ struct unwind_state {
 #ifdef CONFIG_KRETPROBES
        struct llist_node *kr_cur;
 #endif
+       struct task_struct *task;
 };
 
-static notrace void unwind_init(struct unwind_state *state, unsigned long fp,
-                               unsigned long pc)
+static void unwind_init_common(struct unwind_state *state,
+                              struct task_struct *task)
 {
-       state->fp = fp;
-       state->pc = pc;
+       state->task = task;
 #ifdef CONFIG_KRETPROBES
        state->kr_cur = NULL;
 #endif
@@ -72,7 +74,57 @@ static notrace void unwind_init(struct unwind_state *state, unsigned long fp,
        state->prev_fp = 0;
        state->prev_type = STACK_TYPE_UNKNOWN;
 }
-NOKPROBE_SYMBOL(unwind_init);
+
+/*
+ * Start an unwind from a pt_regs.
+ *
+ * The unwind will begin at the PC within the regs.
+ *
+ * The regs must be on a stack currently owned by the calling task.
+ */
+static inline void unwind_init_from_regs(struct unwind_state *state,
+                                        struct pt_regs *regs)
+{
+       unwind_init_common(state, current);
+
+       state->fp = regs->regs[29];
+       state->pc = regs->pc;
+}
+
+/*
+ * Start an unwind from a caller.
+ *
+ * The unwind will begin at the caller of whichever function this is inlined
+ * into.
+ *
+ * The function which invokes this must be noinline.
+ */
+static __always_inline void unwind_init_from_caller(struct unwind_state *state)
+{
+       unwind_init_common(state, current);
+
+       state->fp = (unsigned long)__builtin_frame_address(1);
+       state->pc = (unsigned long)__builtin_return_address(0);
+}
+
+/*
+ * Start an unwind from a blocked task.
+ *
+ * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
+ * cpu_switch_to()).
+ *
+ * The caller should ensure the task is blocked in cpu_switch_to() for the
+ * duration of the unwind, or the unwind will be bogus. It is never valid to
+ * call this for the current task.
+ */
+static inline void unwind_init_from_task(struct unwind_state *state,
+                                        struct task_struct *task)
+{
+       unwind_init_common(state, task);
+
+       state->fp = thread_saved_fp(task);
+       state->pc = thread_saved_pc(task);
+}
 
 /*
  * Unwind from one frame record (A) to the next frame record (B).
@@ -81,9 +133,9 @@ NOKPROBE_SYMBOL(unwind_init);
  * records (e.g. a cycle), determined based on the location and fp value of A
  * and the location (but not the fp value) of B.
  */
-static int notrace unwind_next(struct task_struct *tsk,
-                              struct unwind_state *state)
+static int notrace unwind_next(struct unwind_state *state)
 {
+       struct task_struct *tsk = state->task;
        unsigned long fp = state->fp;
        struct stack_info info;
 
@@ -117,15 +169,15 @@ static int notrace unwind_next(struct task_struct *tsk,
                if (fp <= state->prev_fp)
                        return -EINVAL;
        } else {
-               set_bit(state->prev_type, state->stacks_done);
+               __set_bit(state->prev_type, state->stacks_done);
        }
 
        /*
         * Record this frame record's values and location. The prev_fp and
         * prev_type are only meaningful to the next unwind_next() invocation.
         */
-       state->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
-       state->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
+       state->fp = READ_ONCE(*(unsigned long *)(fp));
+       state->pc = READ_ONCE(*(unsigned long *)(fp + 8));
        state->prev_fp = fp;
        state->prev_type = info.type;
 
@@ -157,8 +209,7 @@ static int notrace unwind_next(struct task_struct *tsk,
 }
 NOKPROBE_SYMBOL(unwind_next);
 
-static void notrace unwind(struct task_struct *tsk,
-                          struct unwind_state *state,
+static void notrace unwind(struct unwind_state *state,
                           stack_trace_consume_fn consume_entry, void *cookie)
 {
        while (1) {
@@ -166,7 +217,7 @@ static void notrace unwind(struct task_struct *tsk,
 
                if (!consume_entry(cookie, state->pc))
                        break;
-               ret = unwind_next(tsk, state);
+               ret = unwind_next(state);
                if (ret < 0)
                        break;
        }
@@ -212,15 +263,15 @@ noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
 {
        struct unwind_state state;
 
-       if (regs)
-               unwind_init(&state, regs->regs[29], regs->pc);
-       else if (task == current)
-               unwind_init(&state,
-                               (unsigned long)__builtin_frame_address(1),
-                               (unsigned long)__builtin_return_address(0));
-       else
-               unwind_init(&state, thread_saved_fp(task),
-                               thread_saved_pc(task));
-
-       unwind(task, &state, consume_entry, cookie);
+       if (regs) {
+               if (task != current)
+                       return;
+               unwind_init_from_regs(&state, regs);
+       } else if (task == current) {
+               unwind_init_from_caller(&state);
+       } else {
+               unwind_init_from_task(&state, task);
+       }
+
+       unwind(&state, consume_entry, cookie);
 }
index 2b0887e..9135fe0 100644 (file)
@@ -52,7 +52,7 @@ void notrace __cpu_suspend_exit(void)
 
        /* Restore CnP bit in TTBR1_EL1 */
        if (system_supports_cnp())
-               cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+               cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir);
 
        /*
         * PSTATE was not saved over suspend/resume, re-enable any detected
index 9ac7a81..b7fed33 100644 (file)
@@ -579,11 +579,11 @@ static void ctr_read_handler(unsigned long esr, struct pt_regs *regs)
 
        if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
                /* Hide DIC so that we can trap the unnecessary maintenance...*/
-               val &= ~BIT(CTR_DIC_SHIFT);
+               val &= ~BIT(CTR_EL0_DIC_SHIFT);
 
                /* ... and fake IminLine to reduce the number of traps. */
-               val &= ~CTR_IMINLINE_MASK;
-               val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
+               val &= ~CTR_EL0_IminLine_MASK;
+               val |= (PAGE_SHIFT - 2) & CTR_EL0_IminLine_MASK;
        }
 
        pt_regs_write_reg(regs, rt, val);
index f6e25d7..bafbf78 100644 (file)
@@ -24,7 +24,13 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti
 # routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
 # preparation in build-time C")).
 ldflags-y := -shared -soname=linux-vdso.so.1 --hash-style=sysv \
-            -Bsymbolic --build-id=sha1 -n $(btildflags-y) -T
+            -Bsymbolic --build-id=sha1 -n $(btildflags-y)
+
+ifdef CONFIG_LD_ORPHAN_WARN
+  ldflags-y += --orphan-handling=warn
+endif
+
+ldflags-y += -T
 
 ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
 ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
index a5e61e0..e69fb4a 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/const.h>
 #include <asm/page.h>
 #include <asm/vdso.h>
+#include <asm-generic/vmlinux.lds.h>
 
 OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", "elf64-littleaarch64")
 OUTPUT_ARCH(aarch64)
@@ -49,11 +50,24 @@ SECTIONS
 
        .dynamic        : { *(.dynamic) }               :text   :dynamic
 
-       .rodata         : { *(.rodata*) }               :text
+       .rela.dyn       : ALIGN(8) { *(.rela .rela*) }
+
+       .rodata         : {
+               *(.rodata*)
+               *(.got)
+               *(.got.plt)
+               *(.plt)
+               *(.plt.*)
+               *(.iplt)
+               *(.igot .igot.plt)
+       }                                               :text
 
        _end = .;
        PROVIDE(end = .);
 
+       DWARF_DEBUG
+       ELF_DETAILS
+
        /DISCARD/       : {
                *(.data .data.* .gnu.linkonce.d.* .sdata*)
                *(.bss .sbss .dynbss .dynsbss)
index 05ba1aa..36c8f66 100644 (file)
@@ -104,6 +104,7 @@ VDSO_AFLAGS += -D__ASSEMBLY__
 VDSO_LDFLAGS += -Bsymbolic --no-undefined -soname=linux-vdso.so.1
 VDSO_LDFLAGS += -z max-page-size=4096 -z common-page-size=4096
 VDSO_LDFLAGS += -shared --hash-style=sysv --build-id=sha1
+VDSO_LDFLAGS += --orphan-handling=warn
 
 
 # Borrow vdsomunge.c from the arm vDSO
index 3348ce5..8d95d7d 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/const.h>
 #include <asm/page.h>
 #include <asm/vdso.h>
+#include <asm-generic/vmlinux.lds.h>
 
 OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", "elf32-littlearm")
 OUTPUT_ARCH(arm)
@@ -35,12 +36,30 @@ SECTIONS
 
        .dynamic        : { *(.dynamic) }               :text   :dynamic
 
-       .rodata         : { *(.rodata*) }               :text
+       .rodata         : {
+               *(.rodata*)
+               *(.got)
+               *(.got.plt)
+               *(.plt)
+               *(.rel.iplt)
+               *(.iplt)
+               *(.igot.plt)
+       }                                               :text
 
-       .text           : { *(.text*) }                 :text   =0xe7f001f2
+       .text           : {
+               *(.text*)
+               *(.glue_7)
+               *(.glue_7t)
+               *(.vfp11_veneer)
+               *(.v4_bx)
+       }                                               :text   =0xe7f001f2
 
-       .got            : { *(.got) }
-       .rel.plt        : { *(.rel.plt) }
+       .rel.dyn        : { *(.rel*) }
+
+       .ARM.exidx : { *(.ARM.exidx*) }
+       DWARF_DEBUG
+       ELF_DETAILS
+       .ARM.attributes 0 : { *(.ARM.attributes) }
 
        /DISCARD/       : {
                *(.note.GNU-stack)
index 2d4a8f9..45131e3 100644 (file)
@@ -115,7 +115,8 @@ jiffies = jiffies_64;
        __entry_tramp_text_start = .;                   \
        *(.entry.tramp.text)                            \
        . = ALIGN(PAGE_SIZE);                           \
-       __entry_tramp_text_end = .;
+       __entry_tramp_text_end = .;                     \
+       *(.entry.tramp.rodata)
 #else
 #define TRAMP_TEXT
 #endif
@@ -198,8 +199,7 @@ SECTIONS
        }
 
        idmap_pg_dir = .;
-       . += IDMAP_DIR_SIZE;
-       idmap_pg_end = .;
+       . += PAGE_SIZE;
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
        tramp_pg_dir = .;
@@ -235,6 +235,10 @@ SECTIONS
        __inittext_end = .;
        __initdata_begin = .;
 
+       init_idmap_pg_dir = .;
+       . += INIT_IDMAP_DIR_SIZE;
+       init_idmap_pg_end = .;
+
        .init.data : {
                INIT_DATA
                INIT_SETUP(16)
@@ -253,21 +257,17 @@ SECTIONS
        HYPERVISOR_RELOC_SECTION
 
        .rela.dyn : ALIGN(8) {
+               __rela_start = .;
                *(.rela .rela*)
+               __rela_end = .;
        }
 
-       __rela_offset   = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
-       __rela_size     = SIZEOF(.rela.dyn);
-
-#ifdef CONFIG_RELR
        .relr.dyn : ALIGN(8) {
+               __relr_start = .;
                *(.relr.dyn)
+               __relr_end = .;
        }
 
-       __relr_offset   = ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR);
-       __relr_size     = SIZEOF(.relr.dyn);
-#endif
-
        . = ALIGN(SEGMENT_ALIGN);
        __initdata_end = .;
        __init_end = .;
index 4e39ace..3b8d062 100644 (file)
@@ -1230,6 +1230,9 @@ bool kvm_arch_timer_get_input_level(int vintid)
        struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
        struct arch_timer_context *timer;
 
+       if (WARN(!vcpu, "No vcpu context!\n"))
+               return false;
+
        if (vintid == vcpu_vtimer(vcpu)->irq.irq)
                timer = vcpu_vtimer(vcpu);
        else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
index 400bb0f..83a7f61 100644 (file)
@@ -150,8 +150,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        if (ret)
                goto out_free_stage2_pgd;
 
-       if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL)) {
+               ret = -ENOMEM;
                goto out_free_stage2_pgd;
+       }
        cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask);
 
        kvm_vgic_early_init(kvm);
@@ -2110,11 +2112,11 @@ static int finalize_hyp_mode(void)
                return 0;
 
        /*
-        * Exclude HYP BSS from kmemleak so that it doesn't get peeked
-        * at, which would end badly once the section is inaccessible.
-        * None of other sections should ever be introspected.
+        * Exclude HYP sections from kmemleak so that they don't get peeked
+        * at, which would end badly once inaccessible.
         */
        kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
+       kmemleak_free_part(__va(hyp_mem_base), hyp_mem_size);
        return pkvm_drop_host_privileges();
 }
 
@@ -2271,7 +2273,11 @@ static int __init early_kvm_mode_cfg(char *arg)
                return -EINVAL;
 
        if (strcmp(arg, "protected") == 0) {
-               kvm_mode = KVM_MODE_PROTECTED;
+               if (!is_kernel_in_hyp_mode())
+                       kvm_mode = KVM_MODE_PROTECTED;
+               else
+                       pr_warn_once("Protected KVM not available with VHE\n");
+
                return 0;
        }
 
index 3d251a4..6012b08 100644 (file)
@@ -80,6 +80,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
        vcpu->arch.flags &= ~KVM_ARM64_FP_ENABLED;
        vcpu->arch.flags |= KVM_ARM64_FP_HOST;
 
+       vcpu->arch.flags &= ~KVM_ARM64_HOST_SVE_ENABLED;
        if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
                vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
 
@@ -93,6 +94,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
         * operations. Do this for ZA as well for now for simplicity.
         */
        if (system_supports_sme()) {
+               vcpu->arch.flags &= ~KVM_ARM64_HOST_SME_ENABLED;
                if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
                        vcpu->arch.flags |= KVM_ARM64_HOST_SME_ENABLED;
 
index fd55014..fa6e466 100644 (file)
        )
 
 #define PVM_ID_AA64ISAR1_ALLOW (\
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_DPB) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_API) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_JSCVT) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_FCMA) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_LRCPC) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_FRINTTS) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_SB) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_SPECRES) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_BF16) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_DGH) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_I8MM) \
+       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DPB) | \
+       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) | \
+       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) | \
+       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_JSCVT) | \
+       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_FCMA) | \
+       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_LRCPC) | \
+       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) | \
+       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI) | \
+       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_FRINTTS) | \
+       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_SB) | \
+       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_SPECRES) | \
+       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_BF16) | \
+       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DGH) | \
+       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_I8MM) \
        )
 
 #define PVM_ID_AA64ISAR2_ALLOW (\
-       ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) \
+       ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3) | \
+       ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) \
        )
 
 u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
index 78edf07..1e78acf 100644 (file)
@@ -314,15 +314,11 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
 int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
                             enum kvm_pgtable_prot prot)
 {
-       hyp_assert_lock_held(&host_kvm.lock);
-
        return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot);
 }
 
 int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
 {
-       hyp_assert_lock_held(&host_kvm.lock);
-
        return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
                               addr, size, &host_s2_pool, owner_id);
 }
index b6d86e4..6b94c3e 100644 (file)
@@ -173,10 +173,10 @@ static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu)
        u64 allow_mask = PVM_ID_AA64ISAR1_ALLOW;
 
        if (!vcpu_has_ptrauth(vcpu))
-               allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) |
-                               ARM64_FEATURE_MASK(ID_AA64ISAR1_API) |
-                               ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) |
-                               ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI));
+               allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
+                               ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
+                               ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
+                               ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
 
        return id_aa64isar1_el1_sys_val & allow_mask;
 }
@@ -186,8 +186,8 @@ static u64 get_pvm_id_aa64isar2(const struct kvm_vcpu *vcpu)
        u64 allow_mask = PVM_ID_AA64ISAR2_ALLOW;
 
        if (!vcpu_has_ptrauth(vcpu))
-               allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) |
-                               ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3));
+               allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
+                               ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
 
        return id_aa64isar2_el1_sys_val & allow_mask;
 }
@@ -243,15 +243,9 @@ u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
        case SYS_ID_AA64MMFR2_EL1:
                return get_pvm_id_aa64mmfr2(vcpu);
        default:
-               /*
-                * Should never happen because all cases are covered in
-                * pvm_sys_reg_descs[].
-                */
-               WARN_ON(1);
-               break;
+               /* Unhandled ID register, RAZ */
+               return 0;
        }
-
-       return 0;
 }
 
 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
@@ -332,6 +326,16 @@ static bool pvm_gic_read_sre(struct kvm_vcpu *vcpu,
 /* Mark the specified system register as an AArch64 feature id register. */
 #define AARCH64(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch64 }
 
+/*
+ * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
+ * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
+ * (1 <= crm < 8, 0 <= Op2 < 8).
+ */
+#define ID_UNALLOCATED(crm, op2) {                     \
+       Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2),     \
+       .access = pvm_access_id_aarch64,                \
+}
+
 /* Mark the specified system register as Read-As-Zero/Write-Ignored */
 #define RAZ_WI(REG) { SYS_DESC(REG), .access = pvm_access_raz_wi }
 
@@ -375,24 +379,46 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = {
        AARCH32(SYS_MVFR0_EL1),
        AARCH32(SYS_MVFR1_EL1),
        AARCH32(SYS_MVFR2_EL1),
+       ID_UNALLOCATED(3,3),
        AARCH32(SYS_ID_PFR2_EL1),
        AARCH32(SYS_ID_DFR1_EL1),
        AARCH32(SYS_ID_MMFR5_EL1),
+       ID_UNALLOCATED(3,7),
 
        /* AArch64 ID registers */
        /* CRm=4 */
        AARCH64(SYS_ID_AA64PFR0_EL1),
        AARCH64(SYS_ID_AA64PFR1_EL1),
+       ID_UNALLOCATED(4,2),
+       ID_UNALLOCATED(4,3),
        AARCH64(SYS_ID_AA64ZFR0_EL1),
+       ID_UNALLOCATED(4,5),
+       ID_UNALLOCATED(4,6),
+       ID_UNALLOCATED(4,7),
        AARCH64(SYS_ID_AA64DFR0_EL1),
        AARCH64(SYS_ID_AA64DFR1_EL1),
+       ID_UNALLOCATED(5,2),
+       ID_UNALLOCATED(5,3),
        AARCH64(SYS_ID_AA64AFR0_EL1),
        AARCH64(SYS_ID_AA64AFR1_EL1),
+       ID_UNALLOCATED(5,6),
+       ID_UNALLOCATED(5,7),
        AARCH64(SYS_ID_AA64ISAR0_EL1),
        AARCH64(SYS_ID_AA64ISAR1_EL1),
+       AARCH64(SYS_ID_AA64ISAR2_EL1),
+       ID_UNALLOCATED(6,3),
+       ID_UNALLOCATED(6,4),
+       ID_UNALLOCATED(6,5),
+       ID_UNALLOCATED(6,6),
+       ID_UNALLOCATED(6,7),
        AARCH64(SYS_ID_AA64MMFR0_EL1),
        AARCH64(SYS_ID_AA64MMFR1_EL1),
        AARCH64(SYS_ID_AA64MMFR2_EL1),
+       ID_UNALLOCATED(7,3),
+       ID_UNALLOCATED(7,4),
+       ID_UNALLOCATED(7,5),
+       ID_UNALLOCATED(7,6),
+       ID_UNALLOCATED(7,7),
 
        /* Scalable Vector Registers are restricted. */
 
index c06c047..c4fb387 100644 (file)
@@ -1136,17 +1136,17 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
                break;
        case SYS_ID_AA64ISAR1_EL1:
                if (!vcpu_has_ptrauth(vcpu))
-                       val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) |
-                                ARM64_FEATURE_MASK(ID_AA64ISAR1_API) |
-                                ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) |
-                                ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI));
+                       val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
+                                ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
+                                ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
+                                ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
                break;
        case SYS_ID_AA64ISAR2_EL1:
                if (!vcpu_has_ptrauth(vcpu))
-                       val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) |
-                                ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3));
+                       val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
+                                ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
                if (!cpus_have_final_cap(ARM64_HAS_WFXT))
-                       val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_WFXT);
+                       val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
                break;
        case SYS_ID_AA64DFR0_EL1:
                /* Limit debug to ARMv8.0 */
index 77a67e9..e070cda 100644 (file)
@@ -429,11 +429,11 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
                vgic_mmio_read_pending, vgic_mmio_write_spending,
-               NULL, vgic_uaccess_write_spending, 1,
+               vgic_uaccess_read_pending, vgic_uaccess_write_spending, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
                vgic_mmio_read_pending, vgic_mmio_write_cpending,
-               NULL, vgic_uaccess_write_cpending, 1,
+               vgic_uaccess_read_pending, vgic_uaccess_write_cpending, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
                vgic_mmio_read_active, vgic_mmio_write_sactive,
index f7aa7bc..f15e29c 100644 (file)
@@ -353,42 +353,6 @@ static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu,
        return 0;
 }
 
-static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
-                                                 gpa_t addr, unsigned int len)
-{
-       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
-       u32 value = 0;
-       int i;
-
-       /*
-        * pending state of interrupt is latched in pending_latch variable.
-        * Userspace will save and restore pending state and line_level
-        * separately.
-        * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst
-        * for handling of ISPENDR and ICPENDR.
-        */
-       for (i = 0; i < len * 8; i++) {
-               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
-               bool state = irq->pending_latch;
-
-               if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
-                       int err;
-
-                       err = irq_get_irqchip_state(irq->host_irq,
-                                                   IRQCHIP_STATE_PENDING,
-                                                   &state);
-                       WARN_ON(err);
-               }
-
-               if (state)
-                       value |= (1U << i);
-
-               vgic_put_irq(vcpu->kvm, irq);
-       }
-
-       return value;
-}
-
 static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
                                         gpa_t addr, unsigned int len,
                                         unsigned long val)
@@ -666,7 +630,7 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = {
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
                vgic_mmio_read_pending, vgic_mmio_write_spending,
-               vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1,
+               vgic_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR,
                vgic_mmio_read_pending, vgic_mmio_write_cpending,
@@ -750,7 +714,7 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISPENDR0,
                vgic_mmio_read_pending, vgic_mmio_write_spending,
-               vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4,
+               vgic_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICPENDR0,
                vgic_mmio_read_pending, vgic_mmio_write_cpending,
index 49837d3..997d0fc 100644 (file)
@@ -226,8 +226,9 @@ int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
        return 0;
 }
 
-unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
-                                    gpa_t addr, unsigned int len)
+static unsigned long __read_pending(struct kvm_vcpu *vcpu,
+                                   gpa_t addr, unsigned int len,
+                                   bool is_user)
 {
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
        u32 value = 0;
@@ -239,6 +240,15 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
                unsigned long flags;
                bool val;
 
+               /*
+                * When used from userspace with a GICv3 model:
+                *
+                * Pending state of interrupt is latched in pending_latch
+                * variable.  Userspace will save and restore pending state
+                * and line_level separately.
+                * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst
+                * for handling of ISPENDR and ICPENDR.
+                */
                raw_spin_lock_irqsave(&irq->irq_lock, flags);
                if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
                        int err;
@@ -248,10 +258,20 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
                                                    IRQCHIP_STATE_PENDING,
                                                    &val);
                        WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
-               } else if (vgic_irq_is_mapped_level(irq)) {
+               } else if (!is_user && vgic_irq_is_mapped_level(irq)) {
                        val = vgic_get_phys_line_level(irq);
                } else {
-                       val = irq_is_pending(irq);
+                       switch (vcpu->kvm->arch.vgic.vgic_model) {
+                       case KVM_DEV_TYPE_ARM_VGIC_V3:
+                               if (is_user) {
+                                       val = irq->pending_latch;
+                                       break;
+                               }
+                               fallthrough;
+                       default:
+                               val = irq_is_pending(irq);
+                               break;
+                       }
                }
 
                value |= ((u32)val << i);
@@ -263,6 +283,18 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
        return value;
 }
 
+unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
+                                    gpa_t addr, unsigned int len)
+{
+       return __read_pending(vcpu, addr, len, false);
+}
+
+unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu,
+                                       gpa_t addr, unsigned int len)
+{
+       return __read_pending(vcpu, addr, len, true);
+}
+
 static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
 {
        return (vgic_irq_is_sgi(irq->intid) &&
index 3fa696f..6082d4b 100644 (file)
@@ -149,6 +149,9 @@ int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
                                     gpa_t addr, unsigned int len);
 
+unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu,
+                                       gpa_t addr, unsigned int len);
+
 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
                              gpa_t addr, unsigned int len,
                              unsigned long val);
index 8d5f050..d78ae63 100644 (file)
@@ -66,7 +66,7 @@ static void flush_context(void)
         * the next context-switch, we broadcast TLB flush + I-cache
         * invalidation over the inner shareable domain on rollover.
         */
-        kvm_call_hyp(__kvm_flush_vm_context);
+       kvm_call_hyp(__kvm_flush_vm_context);
 }
 
 static bool check_update_reserved_vmid(u64 vmid, u64 newvmid)
index eeb9e45..1b7c93a 100644 (file)
@@ -18,7 +18,7 @@
  */
        .macro  multitag_transfer_size, reg, tmp
        mrs_s   \reg, SYS_GMID_EL1
-       ubfx    \reg, \reg, #SYS_GMID_EL1_BS_SHIFT, #SYS_GMID_EL1_BS_SIZE
+       ubfx    \reg, \reg, #GMID_EL1_BS_SHIFT, #GMID_EL1_BS_SIZE
        mov     \tmp, #4
        lsl     \reg, \tmp, \reg
        .endm
index 0ea6cc2..081058d 100644 (file)
@@ -194,46 +194,3 @@ SYM_FUNC_START(__pi_dcache_clean_pop)
        ret
 SYM_FUNC_END(__pi_dcache_clean_pop)
 SYM_FUNC_ALIAS(dcache_clean_pop, __pi_dcache_clean_pop)
-
-/*
- *     __dma_flush_area(start, size)
- *
- *     clean & invalidate D / U line
- *
- *     - start   - virtual start address of region
- *     - size    - size in question
- */
-SYM_FUNC_START(__pi___dma_flush_area)
-       add     x1, x0, x1
-       dcache_by_line_op civac, sy, x0, x1, x2, x3
-       ret
-SYM_FUNC_END(__pi___dma_flush_area)
-SYM_FUNC_ALIAS(__dma_flush_area, __pi___dma_flush_area)
-
-/*
- *     __dma_map_area(start, size, dir)
- *     - start - kernel virtual start address
- *     - size  - size of region
- *     - dir   - DMA direction
- */
-SYM_FUNC_START(__pi___dma_map_area)
-       add     x1, x0, x1
-       cmp     w2, #DMA_FROM_DEVICE
-       b.eq    __pi_dcache_inval_poc
-       b       __pi_dcache_clean_poc
-SYM_FUNC_END(__pi___dma_map_area)
-SYM_FUNC_ALIAS(__dma_map_area, __pi___dma_map_area)
-
-/*
- *     __dma_unmap_area(start, size, dir)
- *     - start - kernel virtual start address
- *     - size  - size of region
- *     - dir   - DMA direction
- */
-SYM_FUNC_START(__pi___dma_unmap_area)
-       add     x1, x0, x1
-       cmp     w2, #DMA_TO_DEVICE
-       b.ne    __pi_dcache_inval_poc
-       ret
-SYM_FUNC_END(__pi___dma_unmap_area)
-SYM_FUNC_ALIAS(__dma_unmap_area, __pi___dma_unmap_area)
index 0dea80b..2491327 100644 (file)
@@ -23,15 +23,6 @@ void copy_highpage(struct page *to, struct page *from)
 
        if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
                set_bit(PG_mte_tagged, &to->flags);
-               page_kasan_tag_reset(to);
-               /*
-                * We need smp_wmb() in between setting the flags and clearing the
-                * tags because if another thread reads page->flags and builds a
-                * tagged address out of it, there is an actual dependency to the
-                * memory access, but on the current thread we do not guarantee that
-                * the new page->flags are visible before the tags were updated.
-                */
-               smp_wmb();
                mte_copy_page_tags(kto, kfrom);
        }
 }
index 6099c81..599cf81 100644 (file)
 #include <asm/xen/xen-ops.h>
 
 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
-               enum dma_data_direction dir)
+                             enum dma_data_direction dir)
 {
-       __dma_map_area(phys_to_virt(paddr), size, dir);
+       unsigned long start = (unsigned long)phys_to_virt(paddr);
+
+       dcache_clean_poc(start, start + size);
 }
 
 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
-               enum dma_data_direction dir)
+                          enum dma_data_direction dir)
 {
-       __dma_unmap_area(phys_to_virt(paddr), size, dir);
+       unsigned long start = (unsigned long)phys_to_virt(paddr);
+
+       if (dir == DMA_TO_DEVICE)
+               return;
+
+       dcache_inval_poc(start, start + size);
 }
 
 void arch_dma_prep_coherent(struct page *page, size_t size)
 {
-       __dma_flush_area(page_address(page), size);
+       unsigned long start = (unsigned long)page_address(page);
+
+       dcache_clean_inval_poc(start, start + size);
 }
 
 #ifdef CONFIG_IOMMU_DMA
index 4894553..228d681 100644 (file)
@@ -16,13 +16,6 @@ get_ex_fixup(const struct exception_table_entry *ex)
        return ((unsigned long)&ex->fixup + ex->fixup);
 }
 
-static bool ex_handler_fixup(const struct exception_table_entry *ex,
-                            struct pt_regs *regs)
-{
-       regs->pc = get_ex_fixup(ex);
-       return true;
-}
-
 static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
                                        struct pt_regs *regs)
 {
@@ -72,11 +65,10 @@ bool fixup_exception(struct pt_regs *regs)
                return false;
 
        switch (ex->type) {
-       case EX_TYPE_FIXUP:
-               return ex_handler_fixup(ex, regs);
        case EX_TYPE_BPF:
                return ex_handler_bpf(ex, regs);
        case EX_TYPE_UACCESS_ERR_ZERO:
+       case EX_TYPE_KACCESS_ERR_ZERO:
                return ex_handler_uaccess_err_zero(ex, regs);
        case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
                return ex_handler_load_unaligned_zeropad(ex, regs);
index c5e1176..cdf3ffa 100644 (file)
@@ -927,6 +927,5 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
 void tag_clear_highpage(struct page *page)
 {
        mte_zero_clear_page_tags(page_address(page));
-       page_kasan_tag_reset(page);
        set_bit(PG_mte_tagged, &page->flags);
 }
index e2a5ec9..5307ffd 100644 (file)
@@ -100,16 +100,6 @@ int pud_huge(pud_t pud)
 #endif
 }
 
-/*
- * Select all bits except the pfn
- */
-static inline pgprot_t pte_pgprot(pte_t pte)
-{
-       unsigned long pfn = pte_pfn(pte);
-
-       return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
-}
-
 static int find_num_contig(struct mm_struct *mm, unsigned long addr,
                           pte_t *ptep, size_t *pgsize)
 {
@@ -214,6 +204,19 @@ static pte_t get_clear_contig(struct mm_struct *mm,
        return orig_pte;
 }
 
+static pte_t get_clear_contig_flush(struct mm_struct *mm,
+                                   unsigned long addr,
+                                   pte_t *ptep,
+                                   unsigned long pgsize,
+                                   unsigned long ncontig)
+{
+       pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
+       struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
+
+       flush_tlb_range(&vma, addr, addr + (pgsize * ncontig));
+       return orig_pte;
+}
+
 /*
  * Changing some bits of contiguous entries requires us to follow a
  * Break-Before-Make approach, breaking the whole contiguous set
@@ -447,19 +450,20 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
        int ncontig, i;
        size_t pgsize = 0;
        unsigned long pfn = pte_pfn(pte), dpfn;
+       struct mm_struct *mm = vma->vm_mm;
        pgprot_t hugeprot;
        pte_t orig_pte;
 
        if (!pte_cont(pte))
                return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
 
-       ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
+       ncontig = find_num_contig(mm, addr, ptep, &pgsize);
        dpfn = pgsize >> PAGE_SHIFT;
 
        if (!__cont_access_flags_changed(ptep, pte, ncontig))
                return 0;
 
-       orig_pte = get_clear_contig(vma->vm_mm, addr, ptep, pgsize, ncontig);
+       orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
 
        /* Make sure we don't lose the dirty or young state */
        if (pte_dirty(orig_pte))
@@ -470,7 +474,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 
        hugeprot = pte_pgprot(pte);
        for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
-               set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
+               set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
 
        return 1;
 }
@@ -492,7 +496,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
        ncontig = find_num_contig(mm, addr, ptep, &pgsize);
        dpfn = pgsize >> PAGE_SHIFT;
 
-       pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
+       pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
        pte = pte_wrprotect(pte);
 
        hugeprot = pte_pgprot(pte);
@@ -505,17 +509,15 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
 pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
                            unsigned long addr, pte_t *ptep)
 {
+       struct mm_struct *mm = vma->vm_mm;
        size_t pgsize;
        int ncontig;
-       pte_t orig_pte;
 
        if (!pte_cont(READ_ONCE(*ptep)))
                return ptep_clear_flush(vma, addr, ptep);
 
-       ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
-       orig_pte = get_clear_contig(vma->vm_mm, addr, ptep, pgsize, ncontig);
-       flush_tlb_range(vma, addr, addr + pgsize * ncontig);
-       return orig_pte;
+       ncontig = find_num_contig(mm, addr, ptep, &pgsize);
+       return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
 }
 
 static int __init hugetlbpage_init(void)
index 339ee84..b6ef26f 100644 (file)
@@ -389,7 +389,7 @@ void __init arm64_memblock_init(void)
 
        early_init_fdt_scan_reserved_mem();
 
-       if (!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32))
+       if (!defer_reserve_crashkernel())
                reserve_crashkernel();
 
        high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
@@ -438,7 +438,7 @@ void __init bootmem_init(void)
         * request_standard_resources() depends on crashkernel's memory being
         * reserved, so do it here.
         */
-       if (IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32))
+       if (defer_reserve_crashkernel())
                reserve_crashkernel();
 
        memblock_dump_all();
index b21f91c..c5af103 100644 (file)
@@ -1,96 +1,22 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/*
- * Based on arch/arm/mm/ioremap.c
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- * Hacked for ARM by Phil Blundell <philb@gnu.org>
- * Hacked to allow all architectures to build, and various cleanups
- * by Russell King
- * Copyright (C) 2012 ARM Ltd.
- */
 
-#include <linux/export.h>
 #include <linux/mm.h>
-#include <linux/vmalloc.h>
 #include <linux/io.h>
 
-#include <asm/fixmap.h>
-#include <asm/tlbflush.h>
-
-static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
-                                     pgprot_t prot, void *caller)
+bool ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot)
 {
-       unsigned long last_addr;
-       unsigned long offset = phys_addr & ~PAGE_MASK;
-       int err;
-       unsigned long addr;
-       struct vm_struct *area;
+       unsigned long last_addr = phys_addr + size - 1;
 
-       /*
-        * Page align the mapping address and size, taking account of any
-        * offset.
-        */
-       phys_addr &= PAGE_MASK;
-       size = PAGE_ALIGN(size + offset);
+       /* Don't allow outside PHYS_MASK */
+       if (last_addr & ~PHYS_MASK)
+               return false;
 
-       /*
-        * Don't allow wraparound, zero size or outside PHYS_MASK.
-        */
-       last_addr = phys_addr + size - 1;
-       if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
-               return NULL;
-
-       /*
-        * Don't allow RAM to be mapped.
-        */
+       /* Don't allow RAM to be mapped. */
        if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr))))
-               return NULL;
-
-       area = get_vm_area_caller(size, VM_IOREMAP, caller);
-       if (!area)
-               return NULL;
-       addr = (unsigned long)area->addr;
-       area->phys_addr = phys_addr;
-
-       err = ioremap_page_range(addr, addr + size, phys_addr, prot);
-       if (err) {
-               vunmap((void *)addr);
-               return NULL;
-       }
-
-       return (void __iomem *)(offset + addr);
-}
-
-void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
-{
-       return __ioremap_caller(phys_addr, size, prot,
-                               __builtin_return_address(0));
-}
-EXPORT_SYMBOL(__ioremap);
-
-void iounmap(volatile void __iomem *io_addr)
-{
-       unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
-
-       /*
-        * We could get an address outside vmalloc range in case
-        * of ioremap_cache() reusing a RAM mapping.
-        */
-       if (is_vmalloc_addr((void *)addr))
-               vunmap((void *)addr);
-}
-EXPORT_SYMBOL(iounmap);
-
-void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
-{
-       /* For normal memory we already have a cacheable mapping. */
-       if (pfn_is_map_memory(__phys_to_pfn(phys_addr)))
-               return (void __iomem *)__phys_to_virt(phys_addr);
+               return false;
 
-       return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
-                               __builtin_return_address(0));
+       return true;
 }
-EXPORT_SYMBOL(ioremap_cache);
 
 /*
  * Must be called after early_fixmap_init
index c12cd70..e969e68 100644 (file)
@@ -236,7 +236,7 @@ static void __init kasan_init_shadow(void)
         */
        memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
        dsb(ishst);
-       cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
+       cpu_replace_ttbr1(lm_alias(tmp_pg_dir), idmap_pg_dir);
 
        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
@@ -280,7 +280,7 @@ static void __init kasan_init_shadow(void)
                                PAGE_KERNEL_RO));
 
        memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
-       cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+       cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir);
 }
 
 static void __init kasan_init_depth(void)
index 626ec32..db7c4e6 100644 (file)
 #define NO_CONT_MAPPINGS       BIT(1)
 #define NO_EXEC_MAPPINGS       BIT(2)  /* assumes FEAT_HPDS is not used */
 
-u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
-u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
+int idmap_t0sz __ro_after_init;
 
-u64 __section(".mmuoff.data.write") vabits_actual;
+#if VA_BITS > 48
+u64 vabits_actual __ro_after_init = VA_BITS_MIN;
 EXPORT_SYMBOL(vabits_actual);
+#endif
+
+u64 kimage_vaddr __ro_after_init = (u64)&_text;
+EXPORT_SYMBOL(kimage_vaddr);
 
 u64 kimage_voffset __ro_after_init;
 EXPORT_SYMBOL(kimage_voffset);
 
+u32 __boot_cpu_mode[] = { BOOT_CPU_MODE_EL2, BOOT_CPU_MODE_EL1 };
+
+/*
+ * The booting CPU updates the failed status @__early_cpu_boot_status,
+ * with MMU turned off.
+ */
+long __section(".mmuoff.data.write") __early_cpu_boot_status;
+
 /*
  * Empty_zero_page is a special page that is used for zero-initialized data
  * and COW.
@@ -388,6 +400,13 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
        } while (pgdp++, addr = next, addr != end);
 }
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+extern __alias(__create_pgd_mapping)
+void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
+                            phys_addr_t size, pgprot_t prot,
+                            phys_addr_t (*pgtable_alloc)(int), int flags);
+#endif
+
 static phys_addr_t __pgd_pgtable_alloc(int shift)
 {
        void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
@@ -529,8 +548,7 @@ static void __init map_mem(pgd_t *pgdp)
 
 #ifdef CONFIG_KEXEC_CORE
        if (crash_mem_map) {
-               if (IS_ENABLED(CONFIG_ZONE_DMA) ||
-                   IS_ENABLED(CONFIG_ZONE_DMA32))
+               if (defer_reserve_crashkernel())
                        flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
                else if (crashk_res.end)
                        memblock_mark_nomap(crashk_res.start,
@@ -571,8 +589,7 @@ static void __init map_mem(pgd_t *pgdp)
         * through /sys/kernel/kexec_crash_size interface.
         */
 #ifdef CONFIG_KEXEC_CORE
-       if (crash_mem_map &&
-           !IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32)) {
+       if (crash_mem_map && !defer_reserve_crashkernel()) {
                if (crashk_res.end) {
                        __map_memblock(pgdp, crashk_res.start,
                                       crashk_res.end + 1,
@@ -665,13 +682,9 @@ static int __init map_entry_trampoline(void)
                __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
                             pa_start + i * PAGE_SIZE, prot);
 
-       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
-               extern char __entry_tramp_data_start[];
-
-               __set_fixmap(FIX_ENTRY_TRAMP_DATA,
-                            __pa_symbol(__entry_tramp_data_start),
-                            PAGE_KERNEL_RO);
-       }
+       if (IS_ENABLED(CONFIG_RELOCATABLE))
+               __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
+                            pa_start + i * PAGE_SIZE, PAGE_KERNEL_RO);
 
        return 0;
 }
@@ -762,22 +775,57 @@ static void __init map_kernel(pgd_t *pgdp)
        kasan_copy_shadow(pgdp);
 }
 
+static void __init create_idmap(void)
+{
+       u64 start = __pa_symbol(__idmap_text_start);
+       u64 size = __pa_symbol(__idmap_text_end) - start;
+       pgd_t *pgd = idmap_pg_dir;
+       u64 pgd_phys;
+
+       /* check if we need an additional level of translation */
+       if (VA_BITS < 48 && idmap_t0sz < (64 - VA_BITS_MIN)) {
+               pgd_phys = early_pgtable_alloc(PAGE_SHIFT);
+               set_pgd(&idmap_pg_dir[start >> VA_BITS],
+                       __pgd(pgd_phys | P4D_TYPE_TABLE));
+               pgd = __va(pgd_phys);
+       }
+       __create_pgd_mapping(pgd, start, start, size, PAGE_KERNEL_ROX,
+                            early_pgtable_alloc, 0);
+
+       if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
+               extern u32 __idmap_kpti_flag;
+               u64 pa = __pa_symbol(&__idmap_kpti_flag);
+
+               /*
+                * The KPTI G-to-nG conversion code needs a read-write mapping
+                * of its synchronization flag in the ID map.
+                */
+               __create_pgd_mapping(pgd, pa, pa, sizeof(u32), PAGE_KERNEL,
+                                    early_pgtable_alloc, 0);
+       }
+}
+
 void __init paging_init(void)
 {
        pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
+       extern pgd_t init_idmap_pg_dir[];
+
+       idmap_t0sz = 63UL - __fls(__pa_symbol(_end) | GENMASK(VA_BITS_MIN - 1, 0));
 
        map_kernel(pgdp);
        map_mem(pgdp);
 
        pgd_clear_fixmap();
 
-       cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+       cpu_replace_ttbr1(lm_alias(swapper_pg_dir), init_idmap_pg_dir);
        init_mm.pgd = swapper_pg_dir;
 
        memblock_phys_free(__pa_symbol(init_pg_dir),
                           __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
 
        memblock_allow_resize();
+
+       create_idmap();
 }
 
 /*
index a9e50e9..4334dec 100644 (file)
@@ -53,15 +53,6 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page)
        if (!tags)
                return false;
 
-       page_kasan_tag_reset(page);
-       /*
-        * We need smp_wmb() in between setting the flags and clearing the
-        * tags because if another thread reads page->flags and builds a
-        * tagged address out of it, there is an actual dependency to the
-        * memory access, but on the current thread we do not guarantee that
-        * the new page->flags are visible before the tags were updated.
-        */
-       smp_wmb();
        mte_restore_page_tags(page_address(page), tags);
 
        return true;
index 50bbed9..7837a69 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/asm_pointer_auth.h>
 #include <asm/hwcap.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative.h>
@@ -200,34 +201,64 @@ SYM_FUNC_END(idmap_cpu_replace_ttbr1)
        .popsection
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+
+#define KPTI_NG_PTE_FLAGS      (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
+
        .pushsection ".idmap.text", "awx"
 
-       .macro  __idmap_kpti_get_pgtable_ent, type
-       dc      cvac, cur_\()\type\()p          // Ensure any existing dirty
-       dmb     sy                              // lines are written back before
-       ldr     \type, [cur_\()\type\()p]       // loading the entry
-       tbz     \type, #0, skip_\()\type        // Skip invalid and
-       tbnz    \type, #11, skip_\()\type       // non-global entries
+       .macro  kpti_mk_tbl_ng, type, num_entries
+       add     end_\type\()p, cur_\type\()p, #\num_entries * 8
+.Ldo_\type:
+       ldr     \type, [cur_\type\()p]          // Load the entry
+       tbz     \type, #0, .Lnext_\type         // Skip invalid and
+       tbnz    \type, #11, .Lnext_\type        // non-global entries
+       orr     \type, \type, #PTE_NG           // Same bit for blocks and pages
+       str     \type, [cur_\type\()p]          // Update the entry
+       .ifnc   \type, pte
+       tbnz    \type, #1, .Lderef_\type
+       .endif
+.Lnext_\type:
+       add     cur_\type\()p, cur_\type\()p, #8
+       cmp     cur_\type\()p, end_\type\()p
+       b.ne    .Ldo_\type
        .endm
 
-       .macro __idmap_kpti_put_pgtable_ent_ng, type
-       orr     \type, \type, #PTE_NG           // Same bit for blocks and pages
-       str     \type, [cur_\()\type\()p]       // Update the entry and ensure
-       dmb     sy                              // that it is visible to all
-       dc      civac, cur_\()\type\()p         // CPUs.
+       /*
+        * Dereference the current table entry and map it into the temporary
+        * fixmap slot associated with the current level.
+        */
+       .macro  kpti_map_pgtbl, type, level
+       str     xzr, [temp_pte, #8 * (\level + 1)]      // break before make
+       dsb     nshst
+       add     pte, temp_pte, #PAGE_SIZE * (\level + 1)
+       lsr     pte, pte, #12
+       tlbi    vaae1, pte
+       dsb     nsh
+       isb
+
+       phys_to_pte pte, cur_\type\()p
+       add     cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 1)
+       orr     pte, pte, pte_flags
+       str     pte, [temp_pte, #8 * (\level + 1)]
+       dsb     nshst
        .endm
 
 /*
- * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper)
+ * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
+ *                                unsigned long temp_pte_va)
  *
  * Called exactly once from stop_machine context by each CPU found during boot.
  */
-__idmap_kpti_flag:
-       .long   1
+       .pushsection    ".data", "aw", %progbits
+SYM_DATA(__idmap_kpti_flag, .long 1)
+       .popsection
+
 SYM_FUNC_START(idmap_kpti_install_ng_mappings)
        cpu             .req    w0
+       temp_pte        .req    x0
        num_cpus        .req    w1
-       swapper_pa      .req    x2
+       pte_flags       .req    x1
+       temp_pgd_phys   .req    x2
        swapper_ttb     .req    x3
        flag_ptr        .req    x4
        cur_pgdp        .req    x5
@@ -235,17 +266,16 @@ SYM_FUNC_START(idmap_kpti_install_ng_mappings)
        pgd             .req    x7
        cur_pudp        .req    x8
        end_pudp        .req    x9
-       pud             .req    x10
        cur_pmdp        .req    x11
        end_pmdp        .req    x12
-       pmd             .req    x13
        cur_ptep        .req    x14
        end_ptep        .req    x15
        pte             .req    x16
+       valid           .req    x17
 
+       mov     x5, x3                          // preserve temp_pte arg
        mrs     swapper_ttb, ttbr1_el1
-       restore_ttbr1   swapper_ttb
-       adr     flag_ptr, __idmap_kpti_flag
+       adr_l   flag_ptr, __idmap_kpti_flag
 
        cbnz    cpu, __idmap_kpti_secondary
 
@@ -256,98 +286,71 @@ SYM_FUNC_START(idmap_kpti_install_ng_mappings)
        eor     w17, w17, num_cpus
        cbnz    w17, 1b
 
-       /* We need to walk swapper, so turn off the MMU. */
-       pre_disable_mmu_workaround
-       mrs     x17, sctlr_el1
-       bic     x17, x17, #SCTLR_ELx_M
-       msr     sctlr_el1, x17
+       /* Switch to the temporary page tables on this CPU only */
+       __idmap_cpu_set_reserved_ttbr1 x8, x9
+       offset_ttbr1 temp_pgd_phys, x8
+       msr     ttbr1_el1, temp_pgd_phys
        isb
 
+       mov     temp_pte, x5
+       mov     pte_flags, #KPTI_NG_PTE_FLAGS
+
        /* Everybody is enjoying the idmap, so we can rewrite swapper. */
        /* PGD */
-       mov     cur_pgdp, swapper_pa
-       add     end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
-do_pgd:        __idmap_kpti_get_pgtable_ent    pgd
-       tbnz    pgd, #1, walk_puds
-next_pgd:
-       __idmap_kpti_put_pgtable_ent_ng pgd
-skip_pgd:
-       add     cur_pgdp, cur_pgdp, #8
-       cmp     cur_pgdp, end_pgdp
-       b.ne    do_pgd
-
-       /* Publish the updated tables and nuke all the TLBs */
-       dsb     sy
-       tlbi    vmalle1is
-       dsb     ish
-       isb
+       adrp            cur_pgdp, swapper_pg_dir
+       kpti_map_pgtbl  pgd, 0
+       kpti_mk_tbl_ng  pgd, PTRS_PER_PGD
 
-       /* We're done: fire up the MMU again */
-       mrs     x17, sctlr_el1
-       orr     x17, x17, #SCTLR_ELx_M
-       set_sctlr_el1   x17
+       /* Ensure all the updated entries are visible to secondary CPUs */
+       dsb     ishst
+
+       /* We're done: fire up swapper_pg_dir again */
+       __idmap_cpu_set_reserved_ttbr1 x8, x9
+       msr     ttbr1_el1, swapper_ttb
+       isb
 
        /* Set the flag to zero to indicate that we're all done */
        str     wzr, [flag_ptr]
        ret
 
+.Lderef_pgd:
        /* PUD */
-walk_puds:
-       .if CONFIG_PGTABLE_LEVELS > 3
+       .if             CONFIG_PGTABLE_LEVELS > 3
+       pud             .req    x10
        pte_to_phys     cur_pudp, pgd
-       add     end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
-do_pud:        __idmap_kpti_get_pgtable_ent    pud
-       tbnz    pud, #1, walk_pmds
-next_pud:
-       __idmap_kpti_put_pgtable_ent_ng pud
-skip_pud:
-       add     cur_pudp, cur_pudp, 8
-       cmp     cur_pudp, end_pudp
-       b.ne    do_pud
-       b       next_pgd
-       .else /* CONFIG_PGTABLE_LEVELS <= 3 */
-       mov     pud, pgd
-       b       walk_pmds
-next_pud:
-       b       next_pgd
+       kpti_map_pgtbl  pud, 1
+       kpti_mk_tbl_ng  pud, PTRS_PER_PUD
+       b               .Lnext_pgd
+       .else           /* CONFIG_PGTABLE_LEVELS <= 3 */
+       pud             .req    pgd
+       .set            .Lnext_pud, .Lnext_pgd
        .endif
 
+.Lderef_pud:
        /* PMD */
-walk_pmds:
-       .if CONFIG_PGTABLE_LEVELS > 2
+       .if             CONFIG_PGTABLE_LEVELS > 2
+       pmd             .req    x13
        pte_to_phys     cur_pmdp, pud
-       add     end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
-do_pmd:        __idmap_kpti_get_pgtable_ent    pmd
-       tbnz    pmd, #1, walk_ptes
-next_pmd:
-       __idmap_kpti_put_pgtable_ent_ng pmd
-skip_pmd:
-       add     cur_pmdp, cur_pmdp, #8
-       cmp     cur_pmdp, end_pmdp
-       b.ne    do_pmd
-       b       next_pud
-       .else /* CONFIG_PGTABLE_LEVELS <= 2 */
-       mov     pmd, pud
-       b       walk_ptes
-next_pmd:
-       b       next_pud
+       kpti_map_pgtbl  pmd, 2
+       kpti_mk_tbl_ng  pmd, PTRS_PER_PMD
+       b               .Lnext_pud
+       .else           /* CONFIG_PGTABLE_LEVELS <= 2 */
+       pmd             .req    pgd
+       .set            .Lnext_pmd, .Lnext_pgd
        .endif
 
+.Lderef_pmd:
        /* PTE */
-walk_ptes:
        pte_to_phys     cur_ptep, pmd
-       add     end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
-do_pte:        __idmap_kpti_get_pgtable_ent    pte
-       __idmap_kpti_put_pgtable_ent_ng pte
-skip_pte:
-       add     cur_ptep, cur_ptep, #8
-       cmp     cur_ptep, end_ptep
-       b.ne    do_pte
-       b       next_pmd
+       kpti_map_pgtbl  pte, 3
+       kpti_mk_tbl_ng  pte, PTRS_PER_PTE
+       b               .Lnext_pmd
 
        .unreq  cpu
+       .unreq  temp_pte
        .unreq  num_cpus
-       .unreq  swapper_pa
+       .unreq  pte_flags
+       .unreq  temp_pgd_phys
        .unreq  cur_pgdp
        .unreq  end_pgdp
        .unreq  pgd
@@ -360,6 +363,7 @@ skip_pte:
        .unreq  cur_ptep
        .unreq  end_ptep
        .unreq  pte
+       .unreq  valid
 
        /* Secondary CPUs end up here */
 __idmap_kpti_secondary:
@@ -379,7 +383,6 @@ __idmap_kpti_secondary:
        cbnz    w16, 1b
 
        /* All done, act like nothing happened */
-       offset_ttbr1 swapper_ttb, x16
        msr     ttbr1_el1, swapper_ttb
        isb
        ret
@@ -395,6 +398,8 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings)
  *
  *     Initialise the processor for turning the MMU on.
  *
+ * Input:
+ *     x0 - actual number of VA bits (ignored unless VA_BITS > 48)
  * Output:
  *     Return in x0 the value of the SCTLR_EL1 register.
  */
@@ -464,12 +469,11 @@ SYM_FUNC_START(__cpu_setup)
        tcr_clear_errata_bits tcr, x9, x5
 
 #ifdef CONFIG_ARM64_VA_BITS_52
-       ldr_l           x9, vabits_actual
-       sub             x9, xzr, x9
+       sub             x9, xzr, x0
        add             x9, x9, #64
        tcr_set_t1sz    tcr, x9
 #else
-       ldr_l           x9, idmap_t0sz
+       idmap_get_t0sz  x9
 #endif
        tcr_set_t0sz    tcr, x9
 
index 507b203..7796537 100644 (file)
@@ -36,6 +36,7 @@ HAS_RNG
 HAS_SB
 HAS_STAGE2_FWB
 HAS_SYSREG_GIC_CPUIF
+HAS_TIDCP1
 HAS_TLB_RANGE
 HAS_VIRT_HOST_EXTN
 HAS_WFXT
@@ -61,6 +62,7 @@ WORKAROUND_1418040
 WORKAROUND_1463225
 WORKAROUND_1508412
 WORKAROUND_1542419
+WORKAROUND_1742098
 WORKAROUND_1902691
 WORKAROUND_2038923
 WORKAROUND_2064142
index 5c55509..db46192 100755 (executable)
@@ -88,7 +88,7 @@ END {
 
 # skip blank lines and comment lines
 /^$/ { next }
-/^#/ { next }
+/^[\t ]*#/ { next }
 
 /^SysregFields/ {
        change_block("SysregFields", "None", "SysregFields")
index ff5e552..9ae483e 100644 (file)
 # feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration
 # item ACCDATA) though it may be more taseful to do something else.
 
+Sysreg ID_AA64ZFR0_EL1 3       0       0       4       4
+Res0   63:60
+Enum   59:56   F64MM
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   55:52   F32MM
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Res0   51:48
+Enum   47:44   I8MM
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   43:40   SM4
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Res0   39:36
+Enum   35:32   SHA3
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Res0   31:24
+Enum   23:20   BF16
+       0b0000  NI
+       0b0001  IMP
+       0b0010  EBF16
+EndEnum
+Enum   19:16   BitPerm
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Res0   15:8
+Enum   7:4     AES
+       0b0000  NI
+       0b0001  IMP
+       0b0010  PMULL128
+EndEnum
+Enum   3:0     SVEver
+       0b0000  IMP
+       0b0001  SVE2
+EndEnum
+EndSysreg
+
+Sysreg ID_AA64SMFR0_EL1        3       0       0       4       5
+Enum   63      FA64
+       0b0     NI
+       0b1     IMP
+EndEnum
+Res0   62:60
+Field  59:56   SMEver
+Enum   55:52   I16I64
+       0b0000  NI
+       0b1111  IMP
+EndEnum
+Res0   51:49
+Enum   48      F64F64
+       0b0     NI
+       0b1     IMP
+EndEnum
+Res0   47:40
+Enum   39:36   I8I32
+       0b0000  NI
+       0b1111  IMP
+EndEnum
+Enum   35      F16F32
+       0b0     NI
+       0b1     IMP
+EndEnum
+Enum   34      B16F32
+       0b0     NI
+       0b1     IMP
+EndEnum
+Res0   33
+Enum   32      F32F32
+       0b0     NI
+       0b1     IMP
+EndEnum
+Res0   31:0
+EndSysreg
+
 Sysreg ID_AA64ISAR0_EL1        3       0       0       6       0
 Enum   63:60   RNDR
        0b0000  NI
@@ -114,6 +197,122 @@ EndEnum
 Res0   3:0
 EndSysreg
 
+Sysreg ID_AA64ISAR1_EL1        3       0       0       6       1
+Enum   63:60   LS64
+       0b0000  NI
+       0b0001  LS64
+       0b0010  LS64_V
+       0b0011  LS64_ACCDATA
+EndEnum
+Enum   59:56   XS
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   55:52   I8MM
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   51:48   DGH
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   47:44   BF16
+       0b0000  NI
+       0b0001  IMP
+       0b0010  EBF16
+EndEnum
+Enum   43:40   SPECRES
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   39:36   SB
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   35:32   FRINTTS
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   31:28   GPI
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   27:24   GPA
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   23:20   LRCPC
+       0b0000  NI
+       0b0001  IMP
+       0b0010  LRCPC2
+EndEnum
+Enum   19:16   FCMA
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   15:12   JSCVT
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   11:8    API
+       0b0000  NI
+       0b0001  PAuth
+       0b0010  EPAC
+       0b0011  PAuth2
+       0b0100  FPAC
+       0b0101  FPACCOMBINE
+EndEnum
+Enum   7:4     APA
+       0b0000  NI
+       0b0001  PAuth
+       0b0010  EPAC
+       0b0011  PAuth2
+       0b0100  FPAC
+       0b0101  FPACCOMBINE
+EndEnum
+Enum   3:0     DPB
+       0b0000  NI
+       0b0001  IMP
+       0b0010  DPB2
+EndEnum
+EndSysreg
+
+Sysreg ID_AA64ISAR2_EL1        3       0       0       6       2
+Res0   63:28
+Enum   27:24   PAC_frac
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   23:20   BC
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   19:16   MOPS
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   15:12   APA3
+       0b0000  NI
+       0b0001  PAuth
+       0b0010  EPAC
+       0b0011  PAuth2
+       0b0100  FPAC
+       0b0101  FPACCOMBINE
+EndEnum
+Enum   11:8    GPA3
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   7:4     RPRES
+       0b0000  NI
+       0b0001  IMP
+EndEnum
+Enum   3:0     WFxT
+       0b0000  NI
+       0b0010  IMP
+EndEnum
+EndSysreg
+
 Sysreg SCTLR_EL1       3       0       1       0       0
 Field  63      TIDCP
 Field  62      SPINMASK
@@ -257,6 +456,11 @@ Field      5:3     Ctype2
 Field  2:0     Ctype1
 EndSysreg
 
+Sysreg GMID_EL1        3       1       0       0       4
+Res0   63:4
+Field  3:0     BS
+EndSysreg
+
 Sysreg SMIDR_EL1       3       1       0       0       6
 Res0   63:32
 Field  31:24   IMPLEMENTER
@@ -273,6 +477,33 @@ Field      3:1     Level
 Field  0       InD
 EndSysreg
 
+Sysreg CTR_EL0 3       3       0       0       1
+Res0   63:38
+Field  37:32   TminLine
+Res1   31
+Res0   30
+Field  29      DIC
+Field  28      IDC
+Field  27:24   CWG
+Field  23:20   ERG
+Field  19:16   DminLine
+Enum   15:14   L1Ip
+       0b00    VPIPT
+       # This is named as AIVIVT in the ARM but documented as reserved
+       0b01    RESERVED
+       0b10    VIPT
+       0b11    PIPT
+EndEnum
+Res0   13:4
+Field  3:0     IminLine
+EndSysreg
+
+Sysreg DCZID_EL0       3       3       0       0       7
+Res0   63:5
+Field  4       DZP
+Field  3:0     BS
+EndSysreg
+
 Sysreg SVCR    3       3       4       2       2
 Res0   63:2
 Field  1       ZA
@@ -367,3 +598,36 @@ EndSysreg
 Sysreg TTBR1_EL1       3       0       2       0       1
 Fields TTBRx_EL1
 EndSysreg
+
+Sysreg LORSA_EL1       3       0       10      4       0
+Res0   63:52
+Field  51:16   SA
+Res0   15:1
+Field  0       Valid
+EndSysreg
+
+Sysreg LOREA_EL1       3       0       10      4       1
+Res0   63:52
+Field  51:48   EA_51_48
+Field  47:16   EA_47_16
+Res0   15:0
+EndSysreg
+
+Sysreg LORN_EL1        3       0       10      4       2
+Res0   63:8
+Field  7:0     Num
+EndSysreg
+
+Sysreg LORC_EL1        3       0       10      4       3
+Res0   63:10
+Field  9:2     DS
+Res0   1
+Field  0       EN
+EndSysreg
+
+Sysreg LORID_EL1       3       0       10      4       7
+Res0   63:24
+Field  23:16   LD
+Res0   15:8
+Field  7:0     LR
+EndSysreg
index 3498e65..702861c 100644 (file)
@@ -4,21 +4,6 @@
 #define __ASM_CSKY_TLB_H
 
 #include <asm/cacheflush.h>
-
-#define tlb_start_vma(tlb, vma) \
-       do { \
-               if (!(tlb)->fullmm) \
-                       flush_cache_range(vma, (vma)->vm_start, (vma)->vm_end); \
-       }  while (0)
-
-#define tlb_end_vma(tlb, vma) \
-       do { \
-               if (!(tlb)->fullmm) \
-                       flush_tlb_range(vma, (vma)->vm_start, (vma)->vm_end); \
-       }  while (0)
-
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
 #include <asm-generic/tlb.h>
 
 #endif /* __ASM_CSKY_TLB_H */
index 1920d52..62b5b07 100644 (file)
@@ -54,7 +54,6 @@ config LOONGARCH
        select GENERIC_CMOS_UPDATE
        select GENERIC_CPU_AUTOPROBE
        select GENERIC_ENTRY
-       select GENERIC_FIND_FIRST_BIT
        select GENERIC_GETTIMEOFDAY
        select GENERIC_IRQ_MULTI_HANDLER
        select GENERIC_IRQ_PROBE
@@ -70,14 +69,12 @@ config LOONGARCH
        select GENERIC_TIME_VSYSCALL
        select GPIOLIB
        select HAVE_ARCH_AUDITSYSCALL
-       select HAVE_ARCH_COMPILER_H
        select HAVE_ARCH_MMAP_RND_BITS if MMU
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
        select HAVE_ASM_MODVERSIONS
        select HAVE_CONTEXT_TRACKING
-       select HAVE_COPY_THREAD_TLS
        select HAVE_DEBUG_STACKOVERFLOW
        select HAVE_DMA_CONTIGUOUS
        select HAVE_EXIT_THREAD
@@ -86,8 +83,6 @@ config LOONGARCH
        select HAVE_IOREMAP_PROT
        select HAVE_IRQ_EXIT_ON_IRQ_STACK
        select HAVE_IRQ_TIME_ACCOUNTING
-       select HAVE_MEMBLOCK
-       select HAVE_MEMBLOCK_NODE_MAP
        select HAVE_MOD_ARCH_SPECIFIC
        select HAVE_NMI
        select HAVE_PERF_EVENTS
@@ -112,6 +107,7 @@ config LOONGARCH
        select TRACE_IRQFLAGS_SUPPORT
        select USE_PERCPU_NUMA_NODE_ID
        select ZONE_DMA32
+       select MMU_GATHER_MERGE_VMAS if MMU
 
 config 32BIT
        bool
index a1a0408..be037a4 100644 (file)
        nor     \dst, \src, zero
 .endm
 
-.macro bgt r0 r1 label
-       blt     \r1, \r0, \label
-.endm
-
-.macro bltz r0 label
-       blt     \r0, zero, \label
-.endm
-
-.macro bgez r0 label
-       bge     \r0, zero, \label
-.endm
-
 #endif /* _ASM_ASMMACRO_H */
index 979367a..6b9aca9 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/types.h>
 #include <asm/barrier.h>
 #include <asm/cmpxchg.h>
-#include <asm/compiler.h>
 
 #if __SIZEOF_LONG__ == 4
 #define __LL           "ll.w   "
@@ -157,27 +156,25 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
                __asm__ __volatile__(
                "1:     ll.w    %1, %2          # atomic_sub_if_positive\n"
                "       addi.w  %0, %1, %3                              \n"
-               "       or      %1, %0, $zero                           \n"
-               "       blt     %0, $zero, 2f                           \n"
+               "       move    %1, %0                                  \n"
+               "       bltz    %0, 2f                                  \n"
                "       sc.w    %1, %2                                  \n"
-               "       beq     $zero, %1, 1b                           \n"
+               "       beqz    %1, 1b                                  \n"
                "2:                                                     \n"
                __WEAK_LLSC_MB
-               : "=&r" (result), "=&r" (temp),
-                 "+" GCC_OFF_SMALL_ASM() (v->counter)
+               : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
                : "I" (-i));
        } else {
                __asm__ __volatile__(
                "1:     ll.w    %1, %2          # atomic_sub_if_positive\n"
                "       sub.w   %0, %1, %3                              \n"
-               "       or      %1, %0, $zero                           \n"
-               "       blt     %0, $zero, 2f                           \n"
+               "       move    %1, %0                                  \n"
+               "       bltz    %0, 2f                                  \n"
                "       sc.w    %1, %2                                  \n"
-               "       beq     $zero, %1, 1b                           \n"
+               "       beqz    %1, 1b                                  \n"
                "2:                                                     \n"
                __WEAK_LLSC_MB
-               : "=&r" (result), "=&r" (temp),
-                 "+" GCC_OFF_SMALL_ASM() (v->counter)
+               : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
                : "r" (i));
        }
 
@@ -320,27 +317,25 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
                __asm__ __volatile__(
                "1:     ll.d    %1, %2  # atomic64_sub_if_positive      \n"
                "       addi.d  %0, %1, %3                              \n"
-               "       or      %1, %0, $zero                           \n"
-               "       blt     %0, $zero, 2f                           \n"
+               "       move    %1, %0                                  \n"
+               "       bltz    %0, 2f                                  \n"
                "       sc.d    %1, %2                                  \n"
-               "       beq     %1, $zero, 1b                           \n"
+               "       beqz    %1, 1b                                  \n"
                "2:                                                     \n"
                __WEAK_LLSC_MB
-               : "=&r" (result), "=&r" (temp),
-                 "+" GCC_OFF_SMALL_ASM() (v->counter)
+               : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
                : "I" (-i));
        } else {
                __asm__ __volatile__(
                "1:     ll.d    %1, %2  # atomic64_sub_if_positive      \n"
                "       sub.d   %0, %1, %3                              \n"
-               "       or      %1, %0, $zero                           \n"
-               "       blt     %0, $zero, 2f                           \n"
+               "       move    %1, %0                                  \n"
+               "       bltz    %0, 2f                                  \n"
                "       sc.d    %1, %2                                  \n"
-               "       beq     %1, $zero, 1b                           \n"
+               "       beqz    %1, 1b                                  \n"
                "2:                                                     \n"
                __WEAK_LLSC_MB
-               : "=&r" (result), "=&r" (temp),
-                 "+" GCC_OFF_SMALL_ASM() (v->counter)
+               : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
                : "r" (i));
        }
 
index b6517ee..cda9776 100644 (file)
@@ -48,9 +48,9 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
        __asm__ __volatile__(
                "sltu   %0, %1, %2\n\t"
 #if (__SIZEOF_LONG__ == 4)
-               "sub.w  %0, $r0, %0\n\t"
+               "sub.w  %0, $zero, %0\n\t"
 #elif (__SIZEOF_LONG__ == 8)
-               "sub.d  %0, $r0, %0\n\t"
+               "sub.d  %0, $zero, %0\n\t"
 #endif
                : "=r" (mask)
                : "r" (index), "r" (size)
index 3f33c89..9a133e4 100644 (file)
@@ -12,10 +12,9 @@ static inline unsigned long exception_era(struct pt_regs *regs)
        return regs->csr_era;
 }
 
-static inline int compute_return_era(struct pt_regs *regs)
+static inline void compute_return_era(struct pt_regs *regs)
 {
        regs->csr_era += 4;
-       return 0;
 }
 
 #endif /* _ASM_BRANCH_H */
index 75b3a44..0a9b0fa 100644 (file)
@@ -55,9 +55,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
        __asm__ __volatile__(                                           \
        "1:     " ld "  %0, %2          # __cmpxchg_asm \n"             \
        "       bne     %0, %z3, 2f                     \n"             \
-       "       or      $t0, %z4, $zero                 \n"             \
+       "       move    $t0, %z4                        \n"             \
        "       " st "  $t0, %1                         \n"             \
-       "       beq     $zero, $t0, 1b                  \n"             \
+       "       beqz    $t0, 1b                         \n"             \
        "2:                                             \n"             \
        __WEAK_LLSC_MB                                                  \
        : "=&r" (__ret), "=ZB"(*m)                                      \
diff --git a/arch/loongarch/include/asm/compiler.h b/arch/loongarch/include/asm/compiler.h
deleted file mode 100644 (file)
index 657cebe..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
- */
-#ifndef _ASM_COMPILER_H
-#define _ASM_COMPILER_H
-
-#define GCC_OFF_SMALL_ASM() "ZC"
-
-#define LOONGARCH_ISA_LEVEL "loongarch"
-#define LOONGARCH_ISA_ARCH_LEVEL "arch=loongarch"
-#define LOONGARCH_ISA_LEVEL_RAW loongarch
-#define LOONGARCH_ISA_ARCH_LEVEL_RAW LOONGARCH_ISA_LEVEL_RAW
-
-#endif /* _ASM_COMPILER_H */
index f3960b1..5f3ff47 100644 (file)
@@ -288,8 +288,6 @@ struct arch_elf_state {
        .interp_fp_abi = LOONGARCH_ABI_FP_ANY,  \
 }
 
-#define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT)
-
 extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
                            bool is_interp, struct arch_elf_state *state);
 
index adb16e4..b6be527 100644 (file)
@@ -48,6 +48,5 @@
 #define fcsr1  $r1
 #define fcsr2  $r2
 #define fcsr3  $r3
-#define vcsr16 $r16
 
 #endif /* _ASM_FPREGDEF_H */
index 9de8231..feb6658 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/futex.h>
 #include <linux/uaccess.h>
 #include <asm/barrier.h>
-#include <asm/compiler.h>
 #include <asm/errno.h>
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)             \
@@ -17,7 +16,7 @@
        "1:     ll.w    %1, %4 # __futex_atomic_op\n"           \
        "       " insn  "                               \n"     \
        "2:     sc.w    $t0, %2                         \n"     \
-       "       beq     $t0, $zero, 1b                  \n"     \
+       "       beqz    $t0, 1b                         \n"     \
        "3:                                             \n"     \
        "       .section .fixup,\"ax\"                  \n"     \
        "4:     li.w    %0, %6                          \n"     \
@@ -82,9 +81,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv
        "# futex_atomic_cmpxchg_inatomic                        \n"
        "1:     ll.w    %1, %3                                  \n"
        "       bne     %1, %z4, 3f                             \n"
-       "       or      $t0, %z5, $zero                         \n"
+       "       move    $t0, %z5                                \n"
        "2:     sc.w    $t0, %2                                 \n"
-       "       beq     $zero, $t0, 1b                          \n"
+       "       beqz    $t0, 1b                                 \n"
        "3:                                                     \n"
        __WEAK_LLSC_MB
        "       .section .fixup,\"ax\"                          \n"
@@ -95,8 +94,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv
        "       "__UA_ADDR "\t1b, 4b                            \n"
        "       "__UA_ADDR "\t2b, 4b                            \n"
        "       .previous                                       \n"
-       : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
-       : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+       : "+r" (ret), "=&r" (val), "=ZC" (*uaddr)
+       : "ZC" (*uaddr), "Jr" (oldval), "Jr" (newval),
          "i" (-EFAULT)
        : "memory", "t0");
 
index 52121cd..319a8c6 100644 (file)
@@ -9,7 +9,6 @@
 
 #include <linux/compiler.h>
 #include <linux/stringify.h>
-#include <asm/compiler.h>
 #include <asm/loongarch.h>
 
 static inline void arch_local_irq_enable(void)
index 2052a22..65fbbae 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/bitops.h>
 #include <linux/atomic.h>
 #include <asm/cmpxchg.h>
-#include <asm/compiler.h>
 
 typedef struct {
        atomic_long_t a;
index 6a80387..6e8f697 100644 (file)
@@ -39,18 +39,6 @@ extern const struct plat_smp_ops loongson3_smp_ops;
 
 #define MAX_PACKAGES 16
 
-/* Chip Config register of each physical cpu package */
-extern u64 loongson_chipcfg[MAX_PACKAGES];
-#define LOONGSON_CHIPCFG(id) (*(volatile u32 *)(loongson_chipcfg[id]))
-
-/* Chip Temperature register of each physical cpu package */
-extern u64 loongson_chiptemp[MAX_PACKAGES];
-#define LOONGSON_CHIPTEMP(id) (*(volatile u32 *)(loongson_chiptemp[id]))
-
-/* Freq Control register of each physical cpu package */
-extern u64 loongson_freqctrl[MAX_PACKAGES];
-#define LOONGSON_FREQCTRL(id) (*(volatile u32 *)(loongson_freqctrl[id]))
-
 #define xconf_readl(addr) readl(addr)
 #define xconf_readq(addr) readq(addr)
 
@@ -58,7 +46,7 @@ static inline void xconf_writel(u32 val, volatile void __iomem *addr)
 {
        asm volatile (
        "       st.w    %[v], %[hw], 0  \n"
-       "       ld.b    $r0, %[hw], 0   \n"
+       "       ld.b    $zero, %[hw], 0 \n"
        :
        : [hw] "r" (addr), [v] "r" (val)
        );
@@ -68,7 +56,7 @@ static inline void xconf_writeq(u64 val64, volatile void __iomem *addr)
 {
        asm volatile (
        "       st.d    %[v], %[hw], 0  \n"
-       "       ld.b    $r0, %[hw], 0   \n"
+       "       ld.b    $zero, %[hw], 0 \n"
        :
        : [hw] "r" (addr),  [v] "r" (val64)
        );
index 3dba498..dc47fc7 100644 (file)
@@ -6,6 +6,7 @@
 #define _ASM_PAGE_H
 
 #include <linux/const.h>
+#include <asm/addrspace.h>
 
 /*
  * PAGE_SHIFT determines the page size
index 5dc84d8..d9e86cf 100644 (file)
@@ -426,6 +426,11 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
 
 #define kern_addr_valid(addr)  (1)
 
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+       return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT;
+}
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
@@ -497,11 +502,6 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
        return pmd;
 }
 
-static inline unsigned long pmd_pfn(pmd_t pmd)
-{
-       return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT;
-}
-
 static inline struct page *pmd_page(pmd_t pmd)
 {
        if (pmd_trans_huge(pmd))
index 1d63c93..57ec45a 100644 (file)
@@ -80,7 +80,6 @@ BUILD_FPR_ACCESS(64)
 
 struct loongarch_fpu {
        unsigned int    fcsr;
-       unsigned int    vcsr;
        uint64_t        fcc;    /* 8x8 */
        union fpureg    fpr[NUM_FPU_REGS];
 };
@@ -161,7 +160,6 @@ struct thread_struct {
         */                                                     \
        .fpu                    = {                             \
                .fcsr           = 0,                            \
-               .vcsr           = 0,                            \
                .fcc            = 0,                            \
                .fpr            = {{{0,},},},                   \
        },                                                      \
index 26483e3..6b5c2a7 100644 (file)
 static __always_inline void prepare_frametrace(struct pt_regs *regs)
 {
        __asm__ __volatile__(
-               /* Save $r1 */
+               /* Save $ra */
                STORE_ONE_REG(1)
-               /* Use $r1 to save PC */
-               "pcaddi $r1, 0\n\t"
-               STR_LONG_S " $r1, %0\n\t"
-               /* Restore $r1 */
-               STR_LONG_L " $r1, %1, "STR_LONGSIZE"\n\t"
+               /* Use $ra to save PC */
+               "pcaddi $ra, 0\n\t"
+               STR_LONG_S " $ra, %0\n\t"
+               /* Restore $ra */
+               STR_LONG_L " $ra, %1, "STR_LONGSIZE"\n\t"
                STORE_ONE_REG(2)
                STORE_ONE_REG(3)
                STORE_ONE_REG(4)
index 99beb11..b7dd9f1 100644 (file)
@@ -44,14 +44,14 @@ struct thread_info {
 }
 
 /* How to get the thread information struct from C. */
-register struct thread_info *__current_thread_info __asm__("$r2");
+register struct thread_info *__current_thread_info __asm__("$tp");
 
 static inline struct thread_info *current_thread_info(void)
 {
        return __current_thread_info;
 }
 
-register unsigned long current_stack_pointer __asm__("$r3");
+register unsigned long current_stack_pointer __asm__("$sp");
 
 #endif /* !__ASSEMBLY__ */
 
index 4f629ae..dd24f58 100644 (file)
@@ -137,16 +137,6 @@ static inline void invtlb_all(u32 op, u32 info, u64 addr)
                );
 }
 
-/*
- * LoongArch doesn't need any special per-pte or per-vma handling, except
- * we need to flush cache for area to be unmapped.
- */
-#define tlb_start_vma(tlb, vma)                                        \
-       do {                                                    \
-               if (!(tlb)->fullmm)                             \
-                       flush_cache_range(vma, vma->vm_start, vma->vm_end); \
-       }  while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
 
 static void tlb_flush(struct mmu_gather *tlb);
index 217c6a3..2b44edc 100644 (file)
@@ -162,7 +162,7 @@ do {                                                                        \
        "2:                                                     \n"     \
        "       .section .fixup,\"ax\"                          \n"     \
        "3:     li.w    %0, %3                                  \n"     \
-       "       or      %1, $r0, $r0                            \n"     \
+       "       move    %1, $zero                               \n"     \
        "       b       2b                                      \n"     \
        "       .previous                                       \n"     \
        "       .section __ex_table,\"a\"                       \n"     \
index bfb65eb..20cd9e1 100644 (file)
@@ -166,7 +166,6 @@ void output_thread_fpu_defines(void)
 
        OFFSET(THREAD_FCSR, loongarch_fpu, fcsr);
        OFFSET(THREAD_FCC,  loongarch_fpu, fcc);
-       OFFSET(THREAD_VCSR, loongarch_fpu, vcsr);
        BLANK();
 }
 
index b38f548..4662b06 100644 (file)
@@ -4,8 +4,9 @@
  *
  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  */
-#include <asm/cpu-info.h>
 #include <linux/cacheinfo.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu-info.h>
 
 /* Populates leaf and increments to next leaf */
 #define populate_cache(cache, leaf, c_level, c_type)           \
@@ -17,6 +18,8 @@ do {                                                          \
        leaf->ways_of_associativity = c->cache.ways;            \
        leaf->size = c->cache.linesz * c->cache.sets *          \
                c->cache.ways;                                  \
+       if (leaf->level > 2)                                    \
+               leaf->size *= nodes_per_package;                \
        leaf++;                                                 \
 } while (0)
 
@@ -95,11 +98,15 @@ static void cache_cpumap_setup(unsigned int cpu)
 
 int populate_cache_leaves(unsigned int cpu)
 {
-       int level = 1;
+       int level = 1, nodes_per_package = 1;
        struct cpuinfo_loongarch *c = &current_cpu_data;
        struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
        struct cacheinfo *this_leaf = this_cpu_ci->info_list;
 
+       if (loongson_sysconf.nr_nodes > 1)
+               nodes_per_package = loongson_sysconf.cores_per_package
+                                       / loongson_sysconf.cores_per_node;
+
        if (c->icache.waysize) {
                populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
                populate_cache(icache, this_leaf, level++, CACHE_TYPE_INST);
index 6c87ea3..529ab8f 100644 (file)
@@ -263,7 +263,7 @@ void cpu_probe(void)
 
        c->cputype      = CPU_UNKNOWN;
        c->processor_id = read_cpucfg(LOONGARCH_CPUCFG0);
-       c->fpu_vers     = (read_cpucfg(LOONGARCH_CPUCFG2) >> 3) & 0x3;
+       c->fpu_vers     = (read_cpucfg(LOONGARCH_CPUCFG2) & CPUCFG2_FPVERS) >> 3;
 
        c->fpu_csr0     = FPU_CSR_RN;
        c->fpu_mask     = FPU_CSR_RSVD;
index d5b3dbc..d53b631 100644 (file)
@@ -27,7 +27,7 @@ SYM_FUNC_START(handle_syscall)
 
        addi.d  sp, sp, -PT_SIZE
        cfi_st  t2, PT_R3
-       cfi_rel_offset  sp, PT_R3
+       cfi_rel_offset  sp, PT_R3
        st.d    zero, sp, PT_R0
        csrrd   t2, LOONGARCH_CSR_PRMD
        st.d    t2, sp, PT_PRMD
@@ -50,7 +50,7 @@ SYM_FUNC_START(handle_syscall)
        cfi_st  a7, PT_R11
        csrrd   ra, LOONGARCH_CSR_ERA
        st.d    ra, sp, PT_ERA
-       cfi_rel_offset ra, PT_ERA
+       cfi_rel_offset  ra, PT_ERA
 
        cfi_st  tp, PT_R2
        cfi_st  u0, PT_R21
index 467946e..82b478a 100644 (file)
@@ -17,21 +17,6 @@ u64 efi_system_table;
 struct loongson_system_configuration loongson_sysconf;
 EXPORT_SYMBOL(loongson_sysconf);
 
-u64 loongson_chipcfg[MAX_PACKAGES];
-u64 loongson_chiptemp[MAX_PACKAGES];
-u64 loongson_freqctrl[MAX_PACKAGES];
-unsigned long long smp_group[MAX_PACKAGES];
-
-static void __init register_addrs_set(u64 *registers, const u64 addr, int num)
-{
-       u64 i;
-
-       for (i = 0; i < num; i++) {
-               *registers = (i << 44) | addr;
-               registers++;
-       }
-}
-
 void __init init_environ(void)
 {
        int efi_boot = fw_arg0;
@@ -50,11 +35,6 @@ void __init init_environ(void)
        efi_memmap_init_early(&data);
        memblock_reserve(data.phys_map & PAGE_MASK,
                         PAGE_ALIGN(data.size + (data.phys_map & ~PAGE_MASK)));
-
-       register_addrs_set(smp_group, TO_UNCACHE(0x1fe01000), 16);
-       register_addrs_set(loongson_chipcfg, TO_UNCACHE(0x1fe00180), 16);
-       register_addrs_set(loongson_chiptemp, TO_UNCACHE(0x1fe0019c), 16);
-       register_addrs_set(loongson_freqctrl, TO_UNCACHE(0x1fe001d0), 16);
 }
 
 static int __init init_cpu_fullname(void)
index 75c6ce0..576b337 100644 (file)
        .endm
 
        .macro sc_save_fp base
-       EX      fst.d $f0,  \base, (0 * FPU_REG_WIDTH)
-       EX      fst.d $f1,  \base, (1 * FPU_REG_WIDTH)
-       EX      fst.d $f2,  \base, (2 * FPU_REG_WIDTH)
-       EX      fst.d $f3,  \base, (3 * FPU_REG_WIDTH)
-       EX      fst.d $f4,  \base, (4 * FPU_REG_WIDTH)
-       EX      fst.d $f5,  \base, (5 * FPU_REG_WIDTH)
-       EX      fst.d $f6,  \base, (6 * FPU_REG_WIDTH)
-       EX      fst.d $f7,  \base, (7 * FPU_REG_WIDTH)
-       EX      fst.d $f8,  \base, (8 * FPU_REG_WIDTH)
-       EX      fst.d $f9,  \base, (9 * FPU_REG_WIDTH)
-       EX      fst.d $f10, \base, (10 * FPU_REG_WIDTH)
-       EX      fst.d $f11, \base, (11 * FPU_REG_WIDTH)
-       EX      fst.d $f12, \base, (12 * FPU_REG_WIDTH)
-       EX      fst.d $f13, \base, (13 * FPU_REG_WIDTH)
-       EX      fst.d $f14, \base, (14 * FPU_REG_WIDTH)
-       EX      fst.d $f15, \base, (15 * FPU_REG_WIDTH)
-       EX      fst.d $f16, \base, (16 * FPU_REG_WIDTH)
-       EX      fst.d $f17, \base, (17 * FPU_REG_WIDTH)
-       EX      fst.d $f18, \base, (18 * FPU_REG_WIDTH)
-       EX      fst.d $f19, \base, (19 * FPU_REG_WIDTH)
-       EX      fst.d $f20, \base, (20 * FPU_REG_WIDTH)
-       EX      fst.d $f21, \base, (21 * FPU_REG_WIDTH)
-       EX      fst.d $f22, \base, (22 * FPU_REG_WIDTH)
-       EX      fst.d $f23, \base, (23 * FPU_REG_WIDTH)
-       EX      fst.d $f24, \base, (24 * FPU_REG_WIDTH)
-       EX      fst.d $f25, \base, (25 * FPU_REG_WIDTH)
-       EX      fst.d $f26, \base, (26 * FPU_REG_WIDTH)
-       EX      fst.d $f27, \base, (27 * FPU_REG_WIDTH)
-       EX      fst.d $f28, \base, (28 * FPU_REG_WIDTH)
-       EX      fst.d $f29, \base, (29 * FPU_REG_WIDTH)
-       EX      fst.d $f30, \base, (30 * FPU_REG_WIDTH)
-       EX      fst.d $f31, \base, (31 * FPU_REG_WIDTH)
+       EX      fst.d   $f0,  \base, (0 * FPU_REG_WIDTH)
+       EX      fst.d   $f1,  \base, (1 * FPU_REG_WIDTH)
+       EX      fst.d   $f2,  \base, (2 * FPU_REG_WIDTH)
+       EX      fst.d   $f3,  \base, (3 * FPU_REG_WIDTH)
+       EX      fst.d   $f4,  \base, (4 * FPU_REG_WIDTH)
+       EX      fst.d   $f5,  \base, (5 * FPU_REG_WIDTH)
+       EX      fst.d   $f6,  \base, (6 * FPU_REG_WIDTH)
+       EX      fst.d   $f7,  \base, (7 * FPU_REG_WIDTH)
+       EX      fst.d   $f8,  \base, (8 * FPU_REG_WIDTH)
+       EX      fst.d   $f9,  \base, (9 * FPU_REG_WIDTH)
+       EX      fst.d   $f10, \base, (10 * FPU_REG_WIDTH)
+       EX      fst.d   $f11, \base, (11 * FPU_REG_WIDTH)
+       EX      fst.d   $f12, \base, (12 * FPU_REG_WIDTH)
+       EX      fst.d   $f13, \base, (13 * FPU_REG_WIDTH)
+       EX      fst.d   $f14, \base, (14 * FPU_REG_WIDTH)
+       EX      fst.d   $f15, \base, (15 * FPU_REG_WIDTH)
+       EX      fst.d   $f16, \base, (16 * FPU_REG_WIDTH)
+       EX      fst.d   $f17, \base, (17 * FPU_REG_WIDTH)
+       EX      fst.d   $f18, \base, (18 * FPU_REG_WIDTH)
+       EX      fst.d   $f19, \base, (19 * FPU_REG_WIDTH)
+       EX      fst.d   $f20, \base, (20 * FPU_REG_WIDTH)
+       EX      fst.d   $f21, \base, (21 * FPU_REG_WIDTH)
+       EX      fst.d   $f22, \base, (22 * FPU_REG_WIDTH)
+       EX      fst.d   $f23, \base, (23 * FPU_REG_WIDTH)
+       EX      fst.d   $f24, \base, (24 * FPU_REG_WIDTH)
+       EX      fst.d   $f25, \base, (25 * FPU_REG_WIDTH)
+       EX      fst.d   $f26, \base, (26 * FPU_REG_WIDTH)
+       EX      fst.d   $f27, \base, (27 * FPU_REG_WIDTH)
+       EX      fst.d   $f28, \base, (28 * FPU_REG_WIDTH)
+       EX      fst.d   $f29, \base, (29 * FPU_REG_WIDTH)
+       EX      fst.d   $f30, \base, (30 * FPU_REG_WIDTH)
+       EX      fst.d   $f31, \base, (31 * FPU_REG_WIDTH)
        .endm
 
        .macro sc_restore_fp base
-       EX      fld.d $f0,  \base, (0 * FPU_REG_WIDTH)
-       EX      fld.d $f1,  \base, (1 * FPU_REG_WIDTH)
-       EX      fld.d $f2,  \base, (2 * FPU_REG_WIDTH)
-       EX      fld.d $f3,  \base, (3 * FPU_REG_WIDTH)
-       EX      fld.d $f4,  \base, (4 * FPU_REG_WIDTH)
-       EX      fld.d $f5,  \base, (5 * FPU_REG_WIDTH)
-       EX      fld.d $f6,  \base, (6 * FPU_REG_WIDTH)
-       EX      fld.d $f7,  \base, (7 * FPU_REG_WIDTH)
-       EX      fld.d $f8,  \base, (8 * FPU_REG_WIDTH)
-       EX      fld.d $f9,  \base, (9 * FPU_REG_WIDTH)
-       EX      fld.d $f10, \base, (10 * FPU_REG_WIDTH)
-       EX      fld.d $f11, \base, (11 * FPU_REG_WIDTH)
-       EX      fld.d $f12, \base, (12 * FPU_REG_WIDTH)
-       EX      fld.d $f13, \base, (13 * FPU_REG_WIDTH)
-       EX      fld.d $f14, \base, (14 * FPU_REG_WIDTH)
-       EX      fld.d $f15, \base, (15 * FPU_REG_WIDTH)
-       EX      fld.d $f16, \base, (16 * FPU_REG_WIDTH)
-       EX      fld.d $f17, \base, (17 * FPU_REG_WIDTH)
-       EX      fld.d $f18, \base, (18 * FPU_REG_WIDTH)
-       EX      fld.d $f19, \base, (19 * FPU_REG_WIDTH)
-       EX      fld.d $f20, \base, (20 * FPU_REG_WIDTH)
-       EX      fld.d $f21, \base, (21 * FPU_REG_WIDTH)
-       EX      fld.d $f22, \base, (22 * FPU_REG_WIDTH)
-       EX      fld.d $f23, \base, (23 * FPU_REG_WIDTH)
-       EX      fld.d $f24, \base, (24 * FPU_REG_WIDTH)
-       EX      fld.d $f25, \base, (25 * FPU_REG_WIDTH)
-       EX      fld.d $f26, \base, (26 * FPU_REG_WIDTH)
-       EX      fld.d $f27, \base, (27 * FPU_REG_WIDTH)
-       EX      fld.d $f28, \base, (28 * FPU_REG_WIDTH)
-       EX      fld.d $f29, \base, (29 * FPU_REG_WIDTH)
-       EX      fld.d $f30, \base, (30 * FPU_REG_WIDTH)
-       EX      fld.d $f31, \base, (31 * FPU_REG_WIDTH)
+       EX      fld.d   $f0,  \base, (0 * FPU_REG_WIDTH)
+       EX      fld.d   $f1,  \base, (1 * FPU_REG_WIDTH)
+       EX      fld.d   $f2,  \base, (2 * FPU_REG_WIDTH)
+       EX      fld.d   $f3,  \base, (3 * FPU_REG_WIDTH)
+       EX      fld.d   $f4,  \base, (4 * FPU_REG_WIDTH)
+       EX      fld.d   $f5,  \base, (5 * FPU_REG_WIDTH)
+       EX      fld.d   $f6,  \base, (6 * FPU_REG_WIDTH)
+       EX      fld.d   $f7,  \base, (7 * FPU_REG_WIDTH)
+       EX      fld.d   $f8,  \base, (8 * FPU_REG_WIDTH)
+       EX      fld.d   $f9,  \base, (9 * FPU_REG_WIDTH)
+       EX      fld.d   $f10, \base, (10 * FPU_REG_WIDTH)
+       EX      fld.d   $f11, \base, (11 * FPU_REG_WIDTH)
+       EX      fld.d   $f12, \base, (12 * FPU_REG_WIDTH)
+       EX      fld.d   $f13, \base, (13 * FPU_REG_WIDTH)
+       EX      fld.d   $f14, \base, (14 * FPU_REG_WIDTH)
+       EX      fld.d   $f15, \base, (15 * FPU_REG_WIDTH)
+       EX      fld.d   $f16, \base, (16 * FPU_REG_WIDTH)
+       EX      fld.d   $f17, \base, (17 * FPU_REG_WIDTH)
+       EX      fld.d   $f18, \base, (18 * FPU_REG_WIDTH)
+       EX      fld.d   $f19, \base, (19 * FPU_REG_WIDTH)
+       EX      fld.d   $f20, \base, (20 * FPU_REG_WIDTH)
+       EX      fld.d   $f21, \base, (21 * FPU_REG_WIDTH)
+       EX      fld.d   $f22, \base, (22 * FPU_REG_WIDTH)
+       EX      fld.d   $f23, \base, (23 * FPU_REG_WIDTH)
+       EX      fld.d   $f24, \base, (24 * FPU_REG_WIDTH)
+       EX      fld.d   $f25, \base, (25 * FPU_REG_WIDTH)
+       EX      fld.d   $f26, \base, (26 * FPU_REG_WIDTH)
+       EX      fld.d   $f27, \base, (27 * FPU_REG_WIDTH)
+       EX      fld.d   $f28, \base, (28 * FPU_REG_WIDTH)
+       EX      fld.d   $f29, \base, (29 * FPU_REG_WIDTH)
+       EX      fld.d   $f30, \base, (30 * FPU_REG_WIDTH)
+       EX      fld.d   $f31, \base, (31 * FPU_REG_WIDTH)
        .endm
 
        .macro sc_save_fcc base, tmp0, tmp1
        movcf2gr        \tmp0, $fcc0
-       move    \tmp1, \tmp0
+       move            \tmp1, \tmp0
        movcf2gr        \tmp0, $fcc1
        bstrins.d       \tmp1, \tmp0, 15, 8
        movcf2gr        \tmp0, $fcc2
        bstrins.d       \tmp1, \tmp0, 55, 48
        movcf2gr        \tmp0, $fcc7
        bstrins.d       \tmp1, \tmp0, 63, 56
-       EX      st.d \tmp1, \base, 0
+       EX      st.d    \tmp1, \base, 0
        .endm
 
        .macro sc_restore_fcc base, tmp0, tmp1
-       EX      ld.d \tmp0, \base, 0
+       EX      ld.d    \tmp0, \base, 0
        bstrpick.d      \tmp1, \tmp0, 7, 0
        movgr2cf        $fcc0, \tmp1
        bstrpick.d      \tmp1, \tmp0, 15, 8
 
        .macro sc_save_fcsr base, tmp0
        movfcsr2gr      \tmp0, fcsr0
-       EX      st.w \tmp0, \base, 0
+       EX      st.w    \tmp0, \base, 0
        .endm
 
        .macro sc_restore_fcsr base, tmp0
-       EX      ld.w \tmp0, \base, 0
+       EX      ld.w    \tmp0, \base, 0
        movgr2fcsr      fcsr0, \tmp0
        .endm
 
-       .macro sc_save_vcsr base, tmp0
-       movfcsr2gr      \tmp0, vcsr16
-       EX      st.w \tmp0, \base, 0
-       .endm
-
-       .macro sc_restore_vcsr base, tmp0
-       EX      ld.w \tmp0, \base, 0
-       movgr2fcsr      vcsr16, \tmp0
-       .endm
-
 /*
  * Save a thread's fp context.
  */
 SYM_FUNC_START(_save_fp)
        fpu_save_csr    a0 t1
-       fpu_save_double a0 t1                   # clobbers t1
+       fpu_save_double a0 t1                   # clobbers t1
        fpu_save_cc     a0 t1 t2                # clobbers t1, t2
-       jirl zero, ra, 0
+       jr              ra
 SYM_FUNC_END(_save_fp)
 EXPORT_SYMBOL(_save_fp)
 
@@ -171,10 +161,10 @@ EXPORT_SYMBOL(_save_fp)
  * Restore a thread's fp context.
  */
 SYM_FUNC_START(_restore_fp)
-       fpu_restore_double a0 t1                # clobbers t1
-       fpu_restore_csr a0 t1
-       fpu_restore_cc  a0 t1 t2                # clobbers t1, t2
-       jirl zero, ra, 0
+       fpu_restore_double      a0 t1           # clobbers t1
+       fpu_restore_csr         a0 t1
+       fpu_restore_cc          a0 t1 t2        # clobbers t1, t2
+       jr                      ra
 SYM_FUNC_END(_restore_fp)
 
 /*
@@ -226,7 +216,7 @@ SYM_FUNC_START(_init_fpu)
        movgr2fr.d      $f30, t1
        movgr2fr.d      $f31, t1
 
-       jirl zero, ra, 0
+       jr      ra
 SYM_FUNC_END(_init_fpu)
 
 /*
@@ -235,11 +225,11 @@ SYM_FUNC_END(_init_fpu)
  * a2: fcsr
  */
 SYM_FUNC_START(_save_fp_context)
-       sc_save_fcc a1 t1 t2
-       sc_save_fcsr a2 t1
-       sc_save_fp a0
-       li.w    a0, 0                                   # success
-       jirl zero, ra, 0
+       sc_save_fcc     a1 t1 t2
+       sc_save_fcsr    a2 t1
+       sc_save_fp      a0
+       li.w            a0, 0                           # success
+       jr              ra
 SYM_FUNC_END(_save_fp_context)
 
 /*
@@ -248,14 +238,14 @@ SYM_FUNC_END(_save_fp_context)
  * a2: fcsr
  */
 SYM_FUNC_START(_restore_fp_context)
-       sc_restore_fp a0
-       sc_restore_fcc a1 t1 t2
-       sc_restore_fcsr a2 t1
-       li.w    a0, 0                                   # success
-       jirl zero, ra, 0
+       sc_restore_fp   a0
+       sc_restore_fcc  a1 t1 t2
+       sc_restore_fcsr a2 t1
+       li.w            a0, 0                           # success
+       jr              ra
 SYM_FUNC_END(_restore_fp_context)
 
 SYM_FUNC_START(fault)
        li.w    a0, -EFAULT                             # failure
-       jirl zero, ra, 0
+       jr      ra
 SYM_FUNC_END(fault)
index 9349685..75e5be8 100644 (file)
@@ -28,23 +28,23 @@ SYM_FUNC_START(__arch_cpu_idle)
        nop
        idle    0
        /* end of rollback region */
-1:     jirl    zero, ra, 0
+1:     jr      ra
 SYM_FUNC_END(__arch_cpu_idle)
 
 SYM_FUNC_START(handle_vint)
        BACKUP_T0T1
        SAVE_ALL
        la.abs  t1, __arch_cpu_idle
-       LONG_L  t0, sp, PT_ERA
+       LONG_L  t0, sp, PT_ERA
        /* 32 byte rollback region */
        ori     t0, t0, 0x1f
        xori    t0, t0, 0x1f
        bne     t0, t1, 1f
-       LONG_S  t0, sp, PT_ERA
+       LONG_S  t0, sp, PT_ERA
 1:     move    a0, sp
        move    a1, sp
        la.abs  t0, do_vint
-       jirl    ra, t0, 0
+       jirl    ra, t0, 0
        RESTORE_ALL_AND_RET
 SYM_FUNC_END(handle_vint)
 
@@ -72,7 +72,7 @@ SYM_FUNC_END(except_vec_cex)
        build_prep_\prep
        move    a0, sp
        la.abs  t0, do_\handler
-       jirl    ra, t0, 0
+       jirl    ra, t0, 0
        RESTORE_ALL_AND_RET
        SYM_FUNC_END(handle_\exception)
        .endm
@@ -91,5 +91,5 @@ SYM_FUNC_END(except_vec_cex)
 
 SYM_FUNC_START(handle_sys)
        la.abs  t0, handle_syscall
-       jirl    zero, t0, 0
+       jr      t0
 SYM_FUNC_END(handle_sys)
index e596dfc..7062cdf 100644 (file)
@@ -14,8 +14,6 @@
 
        __REF
 
-SYM_ENTRY(_stext, SYM_L_GLOBAL, SYM_A_NONE)
-
 SYM_CODE_START(kernel_entry)                   # kernel entry point
 
        /* Config direct window and set PG */
@@ -34,7 +32,7 @@ SYM_CODE_START(kernel_entry)                  # kernel entry point
        /* We might not get launched at the address the kernel is linked to,
           so we jump there.  */
        la.abs          t0, 0f
-       jirl            zero, t0, 0
+       jr              t0
 0:
        la              t0, __bss_start         # clear .bss
        st.d            zero, t0, 0
@@ -52,7 +50,7 @@ SYM_CODE_START(kernel_entry)                  # kernel entry point
        /* KSave3 used for percpu base, initialized as 0 */
        csrwr           zero, PERCPU_BASE_KS
        /* GPR21 used for percpu base (runtime), initialized as 0 */
-       or              u0, zero, zero
+       move            u0, zero
 
        la              tp, init_thread_union
        /* Set the SP after an empty pt_regs.  */
@@ -87,8 +85,8 @@ SYM_CODE_START(smpboot_entry)
        ld.d            sp, t0, CPU_BOOT_STACK
        ld.d            tp, t0, CPU_BOOT_TINFO
 
-       la.abs  t0, 0f
-       jirl    zero, t0, 0
+       la.abs          t0, 0f
+       jr              t0
 0:
        bl              start_secondary
 SYM_CODE_END(smpboot_entry)
index a76f547..a13f925 100644 (file)
@@ -429,7 +429,6 @@ int __init init_numa_memory(void)
        return 0;
 }
 
-EXPORT_SYMBOL(init_numa_memory);
 #endif
 
 void __init paging_init(void)
index e6ab879..dc2b82e 100644 (file)
@@ -193,7 +193,7 @@ static int fpr_set(struct task_struct *target,
                   const void *kbuf, const void __user *ubuf)
 {
        const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
-       const int fcc_end = fcc_start + sizeof(u64);
+       const int fcsr_start = fcc_start + sizeof(u64);
        int err;
 
        BUG_ON(count % sizeof(elf_fpreg_t));
@@ -209,10 +209,12 @@ static int fpr_set(struct task_struct *target,
        if (err)
                return err;
 
-       if (count > 0)
-               err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                         &target->thread.fpu.fcc,
-                                         fcc_start, fcc_end);
+       err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                 &target->thread.fpu.fcc, fcc_start,
+                                 fcc_start + sizeof(u64));
+       err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                 &target->thread.fpu.fcsr, fcsr_start,
+                                 fcsr_start + sizeof(u32));
 
        return err;
 }
index 2b86469..800c965 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/console.h>
 
 #include <acpi/reboot.h>
-#include <asm/compiler.h>
 #include <asm/idle.h>
 #include <asm/loongarch.h>
 #include <asm/reboot.h>
index c74860b..8f5c2f9 100644 (file)
@@ -126,7 +126,7 @@ static void __init parse_bios_table(const struct dmi_header *dm)
        char *dmi_data = (char *)dm;
 
        bios_extern = *(dmi_data + SMBIOS_BIOSEXTERN_OFFSET);
-       b_info.bios_size = *(dmi_data + SMBIOS_BIOSSIZE_OFFSET);
+       b_info.bios_size = (*(dmi_data + SMBIOS_BIOSSIZE_OFFSET) + 1) << 6;
 
        if (bios_extern & LOONGSON_EFI_ENABLE)
                set_bit(EFI_BOOT, &efi.flags);
index 73cec62..0974310 100644 (file)
@@ -278,116 +278,29 @@ void loongson3_cpu_die(unsigned int cpu)
        mb();
 }
 
-/*
- * The target CPU should go to XKPRANGE (uncached area) and flush
- * ICache/DCache/VCache before the control CPU can safely disable its clock.
- */
-static void loongson3_play_dead(int *state_addr)
+void play_dead(void)
 {
-       register int val;
-       register void *addr;
+       register uint64_t addr;
        register void (*init_fn)(void);
 
-       __asm__ __volatile__(
-               "   li.d %[addr], 0x8000000000000000\n"
-               "1: cacop 0x8, %[addr], 0           \n" /* flush ICache */
-               "   cacop 0x8, %[addr], 1           \n"
-               "   cacop 0x8, %[addr], 2           \n"
-               "   cacop 0x8, %[addr], 3           \n"
-               "   cacop 0x9, %[addr], 0           \n" /* flush DCache */
-               "   cacop 0x9, %[addr], 1           \n"
-               "   cacop 0x9, %[addr], 2           \n"
-               "   cacop 0x9, %[addr], 3           \n"
-               "   addi.w %[sets], %[sets], -1     \n"
-               "   addi.d %[addr], %[addr], 0x40   \n"
-               "   bnez %[sets], 1b                \n"
-               "   li.d %[addr], 0x8000000000000000\n"
-               "2: cacop 0xa, %[addr], 0           \n" /* flush VCache */
-               "   cacop 0xa, %[addr], 1           \n"
-               "   cacop 0xa, %[addr], 2           \n"
-               "   cacop 0xa, %[addr], 3           \n"
-               "   cacop 0xa, %[addr], 4           \n"
-               "   cacop 0xa, %[addr], 5           \n"
-               "   cacop 0xa, %[addr], 6           \n"
-               "   cacop 0xa, %[addr], 7           \n"
-               "   cacop 0xa, %[addr], 8           \n"
-               "   cacop 0xa, %[addr], 9           \n"
-               "   cacop 0xa, %[addr], 10          \n"
-               "   cacop 0xa, %[addr], 11          \n"
-               "   cacop 0xa, %[addr], 12          \n"
-               "   cacop 0xa, %[addr], 13          \n"
-               "   cacop 0xa, %[addr], 14          \n"
-               "   cacop 0xa, %[addr], 15          \n"
-               "   addi.w %[vsets], %[vsets], -1   \n"
-               "   addi.d %[addr], %[addr], 0x40   \n"
-               "   bnez   %[vsets], 2b             \n"
-               "   li.w   %[val], 0x7              \n" /* *state_addr = CPU_DEAD; */
-               "   st.w   %[val], %[state_addr], 0 \n"
-               "   dbar 0                          \n"
-               "   cacop 0x11, %[state_addr], 0    \n" /* flush entry of *state_addr */
-               : [addr] "=&r" (addr), [val] "=&r" (val)
-               : [state_addr] "r" (state_addr),
-                 [sets] "r" (cpu_data[smp_processor_id()].dcache.sets),
-                 [vsets] "r" (cpu_data[smp_processor_id()].vcache.sets));
-
+       idle_task_exit();
        local_irq_enable();
-       change_csr_ecfg(ECFG0_IM, ECFGF_IPI);
+       set_csr_ecfg(ECFGF_IPI);
+       __this_cpu_write(cpu_state, CPU_DEAD);
+
+       __smp_mb();
+       do {
+               __asm__ __volatile__("idle 0\n\t");
+               addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
+       } while (addr == 0);
 
-       __asm__ __volatile__(
-               "   idle      0                     \n"
-               "   li.w      $t0, 0x1020           \n"
-               "   iocsrrd.d %[init_fn], $t0       \n" /* Get init PC */
-               : [init_fn] "=&r" (addr)
-               : /* No Input */
-               : "a0");
-       init_fn = __va(addr);
+       init_fn = (void *)TO_CACHE(addr);
+       iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
 
        init_fn();
        unreachable();
 }
 
-void play_dead(void)
-{
-       int *state_addr;
-       unsigned int cpu = smp_processor_id();
-       void (*play_dead_uncached)(int *s);
-
-       idle_task_exit();
-       play_dead_uncached = (void *)TO_UNCACHE(__pa((unsigned long)loongson3_play_dead));
-       state_addr = &per_cpu(cpu_state, cpu);
-       mb();
-       play_dead_uncached(state_addr);
-}
-
-static int loongson3_enable_clock(unsigned int cpu)
-{
-       uint64_t core_id = cpu_data[cpu].core;
-       uint64_t package_id = cpu_data[cpu].package;
-
-       LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3);
-
-       return 0;
-}
-
-static int loongson3_disable_clock(unsigned int cpu)
-{
-       uint64_t core_id = cpu_data[cpu].core;
-       uint64_t package_id = cpu_data[cpu].package;
-
-       LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3));
-
-       return 0;
-}
-
-static int register_loongson3_notifier(void)
-{
-       return cpuhp_setup_state_nocalls(CPUHP_LOONGARCH_SOC_PREPARE,
-                                        "loongarch/loongson:prepare",
-                                        loongson3_enable_clock,
-                                        loongson3_disable_clock);
-}
-early_initcall(register_loongson3_notifier);
-
 #endif
 
 /*
index 53e2fa8..37e84ac 100644 (file)
@@ -24,8 +24,8 @@ SYM_FUNC_START(__switch_to)
        move    tp, a2
        cpu_restore_nonscratch a1
 
-       li.w    t0, _THREAD_SIZE - 32
-       PTR_ADD t0, t0, tp
+       li.w            t0, _THREAD_SIZE - 32
+       PTR_ADD         t0, t0, tp
        set_saved_sp    t0, t1, t2
 
        ldptr.d t1, a1, THREAD_CSRPRMD
index e4060f8..1bf58c6 100644 (file)
@@ -475,8 +475,7 @@ asmlinkage void noinstr do_ri(struct pt_regs *regs)
 
        die_if_kernel("Reserved instruction in kernel code", regs);
 
-       if (unlikely(compute_return_era(regs) < 0))
-               goto out;
+       compute_return_era(regs);
 
        if (unlikely(get_user(opcode, era) < 0)) {
                status = SIGSEGV;
index 9d50815..69c76f2 100644 (file)
@@ -37,6 +37,7 @@ SECTIONS
        HEAD_TEXT_SECTION
 
        . = ALIGN(PECOFF_SEGMENT_ALIGN);
+       _stext = .;
        .text : {
                TEXT_TEXT
                SCHED_TEXT
@@ -101,6 +102,7 @@ SECTIONS
 
        STABS_DEBUG
        DWARF_DEBUG
+       ELF_DETAILS
 
        .gptab.sdata : {
                *(.gptab.data)
index 25d9be5..16ba2b8 100644 (file)
@@ -32,7 +32,7 @@ SYM_FUNC_START(__clear_user)
 1:     st.b    zero, a0, 0
        addi.d  a0, a0, 1
        addi.d  a1, a1, -1
-       bgt     a1, zero, 1b
+       bgtz    a1, 1b
 
 2:     move    a0, a1
        jr      ra
index 9ae507f..97d2032 100644 (file)
@@ -35,7 +35,7 @@ SYM_FUNC_START(__copy_user)
        addi.d  a0, a0, 1
        addi.d  a1, a1, 1
        addi.d  a2, a2, -1
-       bgt     a2, zero, 1b
+       bgtz    a2, 1b
 
 3:     move    a0, a2
        jr      ra
index 5d85669..831d476 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/smp.h>
 #include <linux/timex.h>
 
-#include <asm/compiler.h>
 #include <asm/processor.h>
 
 void __delay(unsigned long cycles)
index ddc78ab..4c874a7 100644 (file)
 
        .align 5
 SYM_FUNC_START(clear_page)
-       lu12i.w  t0, 1 << (PAGE_SHIFT - 12)
-       add.d    t0, t0, a0
+       lu12i.w t0, 1 << (PAGE_SHIFT - 12)
+       add.d   t0, t0, a0
 1:
-       st.d     zero, a0, 0
-       st.d     zero, a0, 8
-       st.d     zero, a0, 16
-       st.d     zero, a0, 24
-       st.d     zero, a0, 32
-       st.d     zero, a0, 40
-       st.d     zero, a0, 48
-       st.d     zero, a0, 56
-       addi.d   a0,   a0, 128
-       st.d     zero, a0, -64
-       st.d     zero, a0, -56
-       st.d     zero, a0, -48
-       st.d     zero, a0, -40
-       st.d     zero, a0, -32
-       st.d     zero, a0, -24
-       st.d     zero, a0, -16
-       st.d     zero, a0, -8
-       bne      t0,   a0, 1b
+       st.d    zero, a0, 0
+       st.d    zero, a0, 8
+       st.d    zero, a0, 16
+       st.d    zero, a0, 24
+       st.d    zero, a0, 32
+       st.d    zero, a0, 40
+       st.d    zero, a0, 48
+       st.d    zero, a0, 56
+       addi.d  a0,   a0, 128
+       st.d    zero, a0, -64
+       st.d    zero, a0, -56
+       st.d    zero, a0, -48
+       st.d    zero, a0, -40
+       st.d    zero, a0, -32
+       st.d    zero, a0, -24
+       st.d    zero, a0, -16
+       st.d    zero, a0, -8
+       bne     t0,   a0, 1b
 
-       jirl     $r0, ra, 0
+       jr      ra
 SYM_FUNC_END(clear_page)
 EXPORT_SYMBOL(clear_page)
 
 .align 5
 SYM_FUNC_START(copy_page)
-       lu12i.w  t8, 1 << (PAGE_SHIFT - 12)
-       add.d    t8, t8, a0
+       lu12i.w t8, 1 << (PAGE_SHIFT - 12)
+       add.d   t8, t8, a0
 1:
-       ld.d     t0, a1,  0
-       ld.d     t1, a1,  8
-       ld.d     t2, a1,  16
-       ld.d     t3, a1,  24
-       ld.d     t4, a1,  32
-       ld.d     t5, a1,  40
-       ld.d     t6, a1,  48
-       ld.d     t7, a1,  56
+       ld.d    t0, a1, 0
+       ld.d    t1, a1, 8
+       ld.d    t2, a1, 16
+       ld.d    t3, a1, 24
+       ld.d    t4, a1, 32
+       ld.d    t5, a1, 40
+       ld.d    t6, a1, 48
+       ld.d    t7, a1, 56
 
-       st.d     t0, a0,  0
-       st.d     t1, a0,  8
-       ld.d     t0, a1,  64
-       ld.d     t1, a1,  72
-       st.d     t2, a0,  16
-       st.d     t3, a0,  24
-       ld.d     t2, a1,  80
-       ld.d     t3, a1,  88
-       st.d     t4, a0,  32
-       st.d     t5, a0,  40
-       ld.d     t4, a1,  96
-       ld.d     t5, a1,  104
-       st.d     t6, a0,  48
-       st.d     t7, a0,  56
-       ld.d     t6, a1,  112
-       ld.d     t7, a1,  120
-       addi.d   a0, a0,  128
-       addi.d   a1, a1,  128
+       st.d    t0, a0, 0
+       st.d    t1, a0, 8
+       ld.d    t0, a1, 64
+       ld.d    t1, a1, 72
+       st.d    t2, a0, 16
+       st.d    t3, a0, 24
+       ld.d    t2, a1, 80
+       ld.d    t3, a1, 88
+       st.d    t4, a0, 32
+       st.d    t5, a0, 40
+       ld.d    t4, a1, 96
+       ld.d    t5, a1, 104
+       st.d    t6, a0, 48
+       st.d    t7, a0, 56
+       ld.d    t6, a1, 112
+       ld.d    t7, a1, 120
+       addi.d  a0, a0, 128
+       addi.d  a1, a1, 128
 
-       st.d     t0, a0,  -64
-       st.d     t1, a0,  -56
-       st.d     t2, a0,  -48
-       st.d     t3, a0,  -40
-       st.d     t4, a0,  -32
-       st.d     t5, a0,  -24
-       st.d     t6, a0,  -16
-       st.d     t7, a0,  -8
+       st.d    t0, a0, -64
+       st.d    t1, a0, -56
+       st.d    t2, a0, -48
+       st.d    t3, a0, -40
+       st.d    t4, a0, -32
+       st.d    t5, a0, -24
+       st.d    t6, a0, -16
+       st.d    t7, a0, -8
 
-       bne      t8, a0, 1b
-       jirl     $r0, ra, 0
+       bne     t8, a0, 1b
+       jr      ra
 SYM_FUNC_END(copy_page)
 EXPORT_SYMBOL(copy_page)
index e272f8a..9818ce1 100644 (file)
@@ -281,15 +281,16 @@ void setup_tlb_handler(int cpu)
                if (pcpu_handlers[cpu])
                        return;
 
-               page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, get_order(vec_sz));
+               page = alloc_pages_node(cpu_to_node(cpu), GFP_ATOMIC, get_order(vec_sz));
                if (!page)
                        return;
 
                addr = page_address(page);
-               pcpu_handlers[cpu] = virt_to_phys(addr);
+               pcpu_handlers[cpu] = (unsigned long)addr;
                memcpy((void *)addr, (void *)eentry, vec_sz);
                local_flush_icache_range((unsigned long)addr, (unsigned long)addr + vec_sz);
-               csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_TLBRENTRY);
+               csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
+               csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
                csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
        }
 #endif
index 7eee402..de19fa2 100644 (file)
@@ -18,7 +18,7 @@
        REG_S   a2, sp, PT_BVADDR
        li.w    a1, \write
        la.abs  t0, do_page_fault
-       jirl    ra, t0, 0
+       jirl    ra, t0, 0
        RESTORE_ALL_AND_RET
        SYM_FUNC_END(tlb_do_page_fault_\write)
        .endm
@@ -34,7 +34,7 @@ SYM_FUNC_START(handle_tlb_protect)
        csrrd   a2, LOONGARCH_CSR_BADV
        REG_S   a2, sp, PT_BVADDR
        la.abs  t0, do_page_fault
-       jirl    ra, t0, 0
+       jirl    ra, t0, 0
        RESTORE_ALL_AND_RET
 SYM_FUNC_END(handle_tlb_protect)
 
@@ -47,7 +47,7 @@ SYM_FUNC_START(handle_tlb_load)
         * The vmalloc handling is not in the hotpath.
         */
        csrrd   t0, LOONGARCH_CSR_BADV
-       blt     t0, $r0, vmalloc_load
+       bltz    t0, vmalloc_load
        csrrd   t1, LOONGARCH_CSR_PGDL
 
 vmalloc_done_load:
@@ -80,7 +80,7 @@ vmalloc_done_load:
         * see if we need to jump to huge tlb processing.
         */
        andi    t0, ra, _PAGE_HUGE
-       bne     t0, $r0, tlb_huge_update_load
+       bnez    t0, tlb_huge_update_load
 
        csrrd   t0, LOONGARCH_CSR_BADV
        srli.d  t0, t0, (PAGE_SHIFT + PTE_ORDER)
@@ -100,12 +100,12 @@ smp_pgtable_change_load:
 
        srli.d  ra, t0, _PAGE_PRESENT_SHIFT
        andi    ra, ra, 1
-       beq     ra, $r0, nopage_tlb_load
+       beqz    ra, nopage_tlb_load
 
        ori     t0, t0, _PAGE_VALID
 #ifdef CONFIG_SMP
        sc.d    t0, t1, 0
-       beq     t0, $r0, smp_pgtable_change_load
+       beqz    t0, smp_pgtable_change_load
 #else
        st.d    t0, t1, 0
 #endif
@@ -139,23 +139,23 @@ tlb_huge_update_load:
 #endif
        srli.d  ra, t0, _PAGE_PRESENT_SHIFT
        andi    ra, ra, 1
-       beq     ra, $r0, nopage_tlb_load
+       beqz    ra, nopage_tlb_load
        tlbsrch
 
        ori     t0, t0, _PAGE_VALID
 #ifdef CONFIG_SMP
        sc.d    t0, t1, 0
-       beq     t0, $r0, tlb_huge_update_load
+       beqz    t0, tlb_huge_update_load
        ld.d    t0, t1, 0
 #else
        st.d    t0, t1, 0
 #endif
-       addu16i.d       t1, $r0, -(CSR_TLBIDX_EHINV >> 16)
-       addi.d  ra, t1, 0
-       csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
+       addu16i.d       t1, zero, -(CSR_TLBIDX_EHINV >> 16)
+       addi.d          ra, t1, 0
+       csrxchg         ra, t1, LOONGARCH_CSR_TLBIDX
        tlbwr
 
-       csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX
+       csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
 
        /*
         * A huge PTE describes an area the size of the
@@ -178,27 +178,27 @@ tlb_huge_update_load:
        addi.d  t0, ra, 0
 
        /* Convert to entrylo1 */
-       addi.d  t1, $r0, 1
+       addi.d  t1, zero, 1
        slli.d  t1, t1, (HPAGE_SHIFT - 1)
        add.d   t0, t0, t1
        csrwr   t0, LOONGARCH_CSR_TLBELO1
 
        /* Set huge page tlb entry size */
-       addu16i.d       t0, $r0, (CSR_TLBIDX_PS >> 16)
-       addu16i.d       t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+       addu16i.d       t0, zero, (CSR_TLBIDX_PS >> 16)
+       addu16i.d       t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
        csrxchg         t1, t0, LOONGARCH_CSR_TLBIDX
 
        tlbfill
 
-       addu16i.d       t0, $r0, (CSR_TLBIDX_PS >> 16)
-       addu16i.d       t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+       addu16i.d       t0, zero, (CSR_TLBIDX_PS >> 16)
+       addu16i.d       t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
        csrxchg         t1, t0, LOONGARCH_CSR_TLBIDX
 
 nopage_tlb_load:
        dbar    0
        csrrd   ra, EXCEPTION_KS2
        la.abs  t0, tlb_do_page_fault_0
-       jirl    $r0, t0, 0
+       jr      t0
 SYM_FUNC_END(handle_tlb_load)
 
 SYM_FUNC_START(handle_tlb_store)
@@ -210,7 +210,7 @@ SYM_FUNC_START(handle_tlb_store)
         * The vmalloc handling is not in the hotpath.
         */
        csrrd   t0, LOONGARCH_CSR_BADV
-       blt     t0, $r0, vmalloc_store
+       bltz    t0, vmalloc_store
        csrrd   t1, LOONGARCH_CSR_PGDL
 
 vmalloc_done_store:
@@ -244,7 +244,7 @@ vmalloc_done_store:
         * see if we need to jump to huge tlb processing.
         */
        andi    t0, ra, _PAGE_HUGE
-       bne     t0, $r0, tlb_huge_update_store
+       bnez    t0, tlb_huge_update_store
 
        csrrd   t0, LOONGARCH_CSR_BADV
        srli.d  t0, t0, (PAGE_SHIFT + PTE_ORDER)
@@ -265,12 +265,12 @@ smp_pgtable_change_store:
        srli.d  ra, t0, _PAGE_PRESENT_SHIFT
        andi    ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
        xori    ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
-       bne     ra, $r0, nopage_tlb_store
+       bnez    ra, nopage_tlb_store
 
        ori     t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
 #ifdef CONFIG_SMP
        sc.d    t0, t1, 0
-       beq     t0, $r0, smp_pgtable_change_store
+       beqz    t0, smp_pgtable_change_store
 #else
        st.d    t0, t1, 0
 #endif
@@ -306,24 +306,24 @@ tlb_huge_update_store:
        srli.d  ra, t0, _PAGE_PRESENT_SHIFT
        andi    ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
        xori    ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
-       bne     ra, $r0, nopage_tlb_store
+       bnez    ra, nopage_tlb_store
 
        tlbsrch
        ori     t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
 
 #ifdef CONFIG_SMP
        sc.d    t0, t1, 0
-       beq     t0, $r0, tlb_huge_update_store
+       beqz    t0, tlb_huge_update_store
        ld.d    t0, t1, 0
 #else
        st.d    t0, t1, 0
 #endif
-       addu16i.d       t1, $r0, -(CSR_TLBIDX_EHINV >> 16)
-       addi.d  ra, t1, 0
-       csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
+       addu16i.d       t1, zero, -(CSR_TLBIDX_EHINV >> 16)
+       addi.d          ra, t1, 0
+       csrxchg         ra, t1, LOONGARCH_CSR_TLBIDX
        tlbwr
 
-       csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX
+       csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
        /*
         * A huge PTE describes an area the size of the
         * configured huge page size. This is twice the
@@ -345,28 +345,28 @@ tlb_huge_update_store:
        addi.d  t0, ra, 0
 
        /* Convert to entrylo1 */
-       addi.d  t1, $r0, 1
+       addi.d  t1, zero, 1
        slli.d  t1, t1, (HPAGE_SHIFT - 1)
        add.d   t0, t0, t1
        csrwr   t0, LOONGARCH_CSR_TLBELO1
 
        /* Set huge page tlb entry size */
-       addu16i.d       t0, $r0, (CSR_TLBIDX_PS >> 16)
-       addu16i.d       t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+       addu16i.d       t0, zero, (CSR_TLBIDX_PS >> 16)
+       addu16i.d       t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
        csrxchg         t1, t0, LOONGARCH_CSR_TLBIDX
 
        tlbfill
 
        /* Reset default page size */
-       addu16i.d       t0, $r0, (CSR_TLBIDX_PS >> 16)
-       addu16i.d       t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+       addu16i.d       t0, zero, (CSR_TLBIDX_PS >> 16)
+       addu16i.d       t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
        csrxchg         t1, t0, LOONGARCH_CSR_TLBIDX
 
 nopage_tlb_store:
        dbar    0
        csrrd   ra, EXCEPTION_KS2
        la.abs  t0, tlb_do_page_fault_1
-       jirl    $r0, t0, 0
+       jr      t0
 SYM_FUNC_END(handle_tlb_store)
 
 SYM_FUNC_START(handle_tlb_modify)
@@ -378,7 +378,7 @@ SYM_FUNC_START(handle_tlb_modify)
         * The vmalloc handling is not in the hotpath.
         */
        csrrd   t0, LOONGARCH_CSR_BADV
-       blt     t0, $r0, vmalloc_modify
+       bltz    t0, vmalloc_modify
        csrrd   t1, LOONGARCH_CSR_PGDL
 
 vmalloc_done_modify:
@@ -411,7 +411,7 @@ vmalloc_done_modify:
         * see if we need to jump to huge tlb processing.
         */
        andi    t0, ra, _PAGE_HUGE
-       bne     t0, $r0, tlb_huge_update_modify
+       bnez    t0, tlb_huge_update_modify
 
        csrrd   t0, LOONGARCH_CSR_BADV
        srli.d  t0, t0, (PAGE_SHIFT + PTE_ORDER)
@@ -431,12 +431,12 @@ smp_pgtable_change_modify:
 
        srli.d  ra, t0, _PAGE_WRITE_SHIFT
        andi    ra, ra, 1
-       beq     ra, $r0, nopage_tlb_modify
+       beqz    ra, nopage_tlb_modify
 
        ori     t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
 #ifdef CONFIG_SMP
        sc.d    t0, t1, 0
-       beq     t0, $r0, smp_pgtable_change_modify
+       beqz    t0, smp_pgtable_change_modify
 #else
        st.d    t0, t1, 0
 #endif
@@ -454,7 +454,7 @@ leave_modify:
        ertn
 #ifdef CONFIG_64BIT
 vmalloc_modify:
-       la.abs  t1, swapper_pg_dir
+       la.abs  t1, swapper_pg_dir
        b       vmalloc_done_modify
 #endif
 
@@ -471,14 +471,14 @@ tlb_huge_update_modify:
 
        srli.d  ra, t0, _PAGE_WRITE_SHIFT
        andi    ra, ra, 1
-       beq     ra, $r0, nopage_tlb_modify
+       beqz    ra, nopage_tlb_modify
 
        tlbsrch
        ori     t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
 
 #ifdef CONFIG_SMP
        sc.d    t0, t1, 0
-       beq     t0, $r0, tlb_huge_update_modify
+       beqz    t0, tlb_huge_update_modify
        ld.d    t0, t1, 0
 #else
        st.d    t0, t1, 0
@@ -504,28 +504,28 @@ tlb_huge_update_modify:
        addi.d  t0, ra, 0
 
        /* Convert to entrylo1 */
-       addi.d  t1, $r0, 1
+       addi.d  t1, zero, 1
        slli.d  t1, t1, (HPAGE_SHIFT - 1)
        add.d   t0, t0, t1
        csrwr   t0, LOONGARCH_CSR_TLBELO1
 
        /* Set huge page tlb entry size */
-       addu16i.d       t0, $r0, (CSR_TLBIDX_PS >> 16)
-       addu16i.d       t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
-       csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
+       addu16i.d       t0, zero, (CSR_TLBIDX_PS >> 16)
+       addu16i.d       t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+       csrxchg         t1, t0, LOONGARCH_CSR_TLBIDX
 
        tlbwr
 
        /* Reset default page size */
-       addu16i.d       t0, $r0, (CSR_TLBIDX_PS >> 16)
-       addu16i.d       t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
-       csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
+       addu16i.d       t0, zero, (CSR_TLBIDX_PS >> 16)
+       addu16i.d       t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+       csrxchg         t1, t0, LOONGARCH_CSR_TLBIDX
 
 nopage_tlb_modify:
        dbar    0
        csrrd   ra, EXCEPTION_KS2
        la.abs  t0, tlb_do_page_fault_1
-       jirl    $r0, t0, 0
+       jr      t0
 SYM_FUNC_END(handle_tlb_modify)
 
 SYM_FUNC_START(handle_tlb_refill)
index 6b6e167..92e4040 100644 (file)
@@ -21,6 +21,7 @@ ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS))
 endif
 
 cflags-vdso := $(ccflags-vdso) \
+       -isystem $(shell $(CC) -print-file-name=include) \
        $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
        -O2 -g -fno-strict-aliasing -fno-common -fno-builtin -G0 \
        -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
index f3aa441..e0e9e31 100644 (file)
@@ -155,7 +155,7 @@ config M520x
        select COLDFIRE_PIT_TIMER
        select HAVE_CACHE_SPLIT
        help
-          Freescale Coldfire 5207/5208 processor support.
+         Freescale Coldfire 5207/5208 processor support.
 
 config M523x
        bool "MCF523x"
@@ -322,7 +322,6 @@ config COLDFIRE_SLTIMERS
 
 endif # COLDFIRE
 
-
 comment "Processor Specific Options"
 
 config M68KFPU_EMU
@@ -522,7 +521,7 @@ config CACHE_BOTH
          Split the ColdFire CPU cache, and use half as an instruction cache
          and half as a data cache.
 endchoice
-endif
+endif # HAVE_CACHE_SPLIT
 
 if HAVE_CACHE_CB
 choice
@@ -539,4 +538,4 @@ config CACHE_COPYBACK
        help
          The ColdFire CPU cache is set into Copy-back mode.
 endchoice
-endif
+endif # HAVE_CACHE_CB
index 11b306b..465e28b 100644 (file)
@@ -1,11 +1,11 @@
 # SPDX-License-Identifier: GPL-2.0
 
 config BOOTPARAM
-       bool 'Compiled-in Kernel Boot Parameter'
+       bool "Compiled-in Kernel Boot Parameter"
 
 config BOOTPARAM_STRING
-       string 'Kernel Boot Parameter'
-       default 'console=ttyS0,19200'
+       string "Kernel Boot Parameter"
+       default "console=ttyS0,19200"
        depends on BOOTPARAM
 
 config EARLY_PRINTK
index a104256..53c45cc 100644 (file)
@@ -161,10 +161,11 @@ config VIRT
        select RTC_CLASS
        select RTC_DRV_GOLDFISH
        select TTY
+       select VIRTIO_MENU
        select VIRTIO_MMIO
        help
          This options enable a pure virtual machine based on m68k,
-         VIRTIO MMIO devices and GOLDFISH interfaces (TTY, RTC, PIC)
+         VIRTIO MMIO devices and GOLDFISH interfaces (TTY, RTC, PIC).
 
 config PILOT
        bool
@@ -492,4 +493,4 @@ config ROMKERNEL
 
 endchoice
 
-endif
+endif # !MMU || COLDFIRE
index c181030..a6a886a 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_USERFAULTFD=y
-CONFIG_SLAB=y
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -43,8 +41,9 @@ CONFIG_MQ_IOSCHED_KYBER=m
 CONFIG_IOSCHED_BFQ=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
+CONFIG_SLAB=y
 # CONFIG_COMPACTION is not set
-CONFIG_ZPOOL=m
+CONFIG_USERFAULTFD=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -310,6 +309,7 @@ CONFIG_PARPORT_MFC3=m
 CONFIG_PARPORT_1284=y
 CONFIG_AMIGA_FLOPPY=y
 CONFIG_AMIGA_Z2RAM=y
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
@@ -580,7 +580,7 @@ CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_SM3_GENERIC=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -595,7 +595,7 @@ CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_SM4_GENERIC=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_LZO=m
@@ -648,11 +648,7 @@ CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
-CONFIG_BITFIELD_KUNIT=m
-CONFIG_RESOURCE_KUNIT_TEST=m
 CONFIG_LINEAR_RANGES_TEST=m
-CONFIG_CMDLINE_KUNIT_TEST=m
-CONFIG_BITS_TEST=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
index 4075564..bffd24c 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_USERFAULTFD=y
-CONFIG_SLAB=y
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -39,8 +37,9 @@ CONFIG_MQ_IOSCHED_KYBER=m
 CONFIG_IOSCHED_BFQ=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
+CONFIG_SLAB=y
 # CONFIG_COMPACTION is not set
-CONFIG_ZPOOL=m
+CONFIG_USERFAULTFD=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -300,6 +299,7 @@ CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
@@ -537,7 +537,7 @@ CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_SM3_GENERIC=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -552,7 +552,7 @@ CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_SM4_GENERIC=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_LZO=m
@@ -604,11 +604,7 @@ CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
-CONFIG_BITFIELD_KUNIT=m
-CONFIG_RESOURCE_KUNIT_TEST=m
 CONFIG_LINEAR_RANGES_TEST=m
-CONFIG_CMDLINE_KUNIT_TEST=m
-CONFIG_BITS_TEST=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
index be0d915..0013425 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_USERFAULTFD=y
-CONFIG_SLAB=y
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -46,8 +44,9 @@ CONFIG_MQ_IOSCHED_KYBER=m
 CONFIG_IOSCHED_BFQ=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
+CONFIG_SLAB=y
 # CONFIG_COMPACTION is not set
-CONFIG_ZPOOL=m
+CONFIG_USERFAULTFD=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -311,6 +310,7 @@ CONFIG_PARPORT=m
 CONFIG_PARPORT_ATARI=m
 CONFIG_PARPORT_1284=y
 CONFIG_ATARI_FLOPPY=y
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
@@ -557,7 +557,7 @@ CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_SM3_GENERIC=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -572,7 +572,7 @@ CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_SM4_GENERIC=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_LZO=m
@@ -625,11 +625,7 @@ CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
-CONFIG_BITFIELD_KUNIT=m
-CONFIG_RESOURCE_KUNIT_TEST=m
 CONFIG_LINEAR_RANGES_TEST=m
-CONFIG_CMDLINE_KUNIT_TEST=m
-CONFIG_BITS_TEST=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
index 9af0e2d..42d9696 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_USERFAULTFD=y
-CONFIG_SLAB=y
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68040=y
@@ -36,8 +34,9 @@ CONFIG_MQ_IOSCHED_KYBER=m
 CONFIG_IOSCHED_BFQ=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
+CONFIG_SLAB=y
 # CONFIG_COMPACTION is not set
-CONFIG_ZPOOL=m
+CONFIG_USERFAULTFD=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -297,6 +296,7 @@ CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
@@ -529,7 +529,7 @@ CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_SM3_GENERIC=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -544,7 +544,7 @@ CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_SM4_GENERIC=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_LZO=m
@@ -596,11 +596,7 @@ CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
-CONFIG_BITFIELD_KUNIT=m
-CONFIG_RESOURCE_KUNIT_TEST=m
 CONFIG_LINEAR_RANGES_TEST=m
-CONFIG_CMDLINE_KUNIT_TEST=m
-CONFIG_BITS_TEST=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
index 49341d6..97d6d9a 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_USERFAULTFD=y
-CONFIG_SLAB=y
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -38,8 +36,9 @@ CONFIG_MQ_IOSCHED_KYBER=m
 CONFIG_IOSCHED_BFQ=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
+CONFIG_SLAB=y
 # CONFIG_COMPACTION is not set
-CONFIG_ZPOOL=m
+CONFIG_USERFAULTFD=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -299,6 +298,7 @@ CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
@@ -539,7 +539,7 @@ CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_SM3_GENERIC=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -554,7 +554,7 @@ CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_SM4_GENERIC=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_LZO=m
@@ -606,11 +606,7 @@ CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
-CONFIG_BITFIELD_KUNIT=m
-CONFIG_RESOURCE_KUNIT_TEST=m
 CONFIG_LINEAR_RANGES_TEST=m
-CONFIG_CMDLINE_KUNIT_TEST=m
-CONFIG_BITS_TEST=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
index 92b33d5..8cbfc1c 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_USERFAULTFD=y
-CONFIG_SLAB=y
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -37,8 +35,9 @@ CONFIG_MQ_IOSCHED_KYBER=m
 CONFIG_IOSCHED_BFQ=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
+CONFIG_SLAB=y
 # CONFIG_COMPACTION is not set
-CONFIG_ZPOOL=m
+CONFIG_USERFAULTFD=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -302,6 +301,7 @@ CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_SWIM=m
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
@@ -559,7 +559,7 @@ CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_SM3_GENERIC=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -574,7 +574,7 @@ CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_SM4_GENERIC=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_LZO=m
@@ -627,11 +627,7 @@ CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
-CONFIG_BITFIELD_KUNIT=m
-CONFIG_RESOURCE_KUNIT_TEST=m
 CONFIG_LINEAR_RANGES_TEST=m
-CONFIG_CMDLINE_KUNIT_TEST=m
-CONFIG_BITS_TEST=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
index 6aaa947..9f45fe6 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_USERFAULTFD=y
-CONFIG_SLAB=y
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -57,8 +55,9 @@ CONFIG_MQ_IOSCHED_KYBER=m
 CONFIG_IOSCHED_BFQ=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
+CONFIG_SLAB=y
 # CONFIG_COMPACTION is not set
-CONFIG_ZPOOL=m
+CONFIG_USERFAULTFD=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -331,6 +330,7 @@ CONFIG_AMIGA_FLOPPY=y
 CONFIG_ATARI_FLOPPY=y
 CONFIG_BLK_DEV_SWIM=m
 CONFIG_AMIGA_Z2RAM=y
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
@@ -645,7 +645,7 @@ CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_SM3_GENERIC=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -660,7 +660,7 @@ CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_SM4_GENERIC=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_LZO=m
@@ -713,11 +713,7 @@ CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
-CONFIG_BITFIELD_KUNIT=m
-CONFIG_RESOURCE_KUNIT_TEST=m
 CONFIG_LINEAR_RANGES_TEST=m
-CONFIG_CMDLINE_KUNIT_TEST=m
-CONFIG_BITS_TEST=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
index b62d65e..4736cfa 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_USERFAULTFD=y
-CONFIG_SLAB=y
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68030=y
@@ -35,8 +33,9 @@ CONFIG_MQ_IOSCHED_KYBER=m
 CONFIG_IOSCHED_BFQ=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
+CONFIG_SLAB=y
 # CONFIG_COMPACTION is not set
-CONFIG_ZPOOL=m
+CONFIG_USERFAULTFD=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -296,6 +295,7 @@ CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
@@ -528,7 +528,7 @@ CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_SM3_GENERIC=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -543,7 +543,7 @@ CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_SM4_GENERIC=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_LZO=m
@@ -595,11 +595,7 @@ CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
-CONFIG_BITFIELD_KUNIT=m
-CONFIG_RESOURCE_KUNIT_TEST=m
 CONFIG_LINEAR_RANGES_TEST=m
-CONFIG_CMDLINE_KUNIT_TEST=m
-CONFIG_BITS_TEST=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
index 8ecf261..638cd38 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_USERFAULTFD=y
-CONFIG_SLAB=y
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68040=y
@@ -36,8 +34,9 @@ CONFIG_MQ_IOSCHED_KYBER=m
 CONFIG_IOSCHED_BFQ=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
+CONFIG_SLAB=y
 # CONFIG_COMPACTION is not set
-CONFIG_ZPOOL=m
+CONFIG_USERFAULTFD=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -297,6 +296,7 @@ CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
@@ -529,7 +529,7 @@ CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_SM3_GENERIC=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -544,7 +544,7 @@ CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_SM4_GENERIC=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_LZO=m
@@ -596,11 +596,7 @@ CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
-CONFIG_BITFIELD_KUNIT=m
-CONFIG_RESOURCE_KUNIT_TEST=m
 CONFIG_LINEAR_RANGES_TEST=m
-CONFIG_CMDLINE_KUNIT_TEST=m
-CONFIG_BITS_TEST=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
index 7540d90..ec8b6bb 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_USERFAULTFD=y
-CONFIG_SLAB=y
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68040=y
@@ -37,8 +35,9 @@ CONFIG_MQ_IOSCHED_KYBER=m
 CONFIG_IOSCHED_BFQ=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
+CONFIG_SLAB=y
 # CONFIG_COMPACTION is not set
-CONFIG_ZPOOL=m
+CONFIG_USERFAULTFD=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -301,6 +300,7 @@ CONFIG_CONNECTOR=m
 CONFIG_PARPORT=m
 CONFIG_PARPORT_PC=m
 CONFIG_PARPORT_1284=y
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
@@ -546,7 +546,7 @@ CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_SM3_GENERIC=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -561,7 +561,7 @@ CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_SM4_GENERIC=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_LZO=m
@@ -614,11 +614,7 @@ CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
-CONFIG_BITFIELD_KUNIT=m
-CONFIG_RESOURCE_KUNIT_TEST=m
 CONFIG_LINEAR_RANGES_TEST=m
-CONFIG_CMDLINE_KUNIT_TEST=m
-CONFIG_BITS_TEST=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
index 832b459..7d8dc57 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_USERFAULTFD=y
-CONFIG_SLAB=y
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_SUN3=y
@@ -33,8 +31,9 @@ CONFIG_MQ_IOSCHED_KYBER=m
 CONFIG_IOSCHED_BFQ=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
+CONFIG_SLAB=y
 # CONFIG_COMPACTION is not set
-CONFIG_ZPOOL=m
+CONFIG_USERFAULTFD=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -294,6 +293,7 @@ CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
@@ -528,7 +528,7 @@ CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_SM3_GENERIC=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -543,7 +543,7 @@ CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_SM4_GENERIC=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_LZO=m
@@ -594,11 +594,7 @@ CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
-CONFIG_BITFIELD_KUNIT=m
-CONFIG_RESOURCE_KUNIT_TEST=m
 CONFIG_LINEAR_RANGES_TEST=m
-CONFIG_CMDLINE_KUNIT_TEST=m
-CONFIG_BITS_TEST=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
index 9171b68..96290ae 100644 (file)
@@ -10,8 +10,6 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_USERFAULTFD=y
-CONFIG_SLAB=y
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_SUN3X=y
@@ -33,8 +31,9 @@ CONFIG_MQ_IOSCHED_KYBER=m
 CONFIG_IOSCHED_BFQ=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
+CONFIG_SLAB=y
 # CONFIG_COMPACTION is not set
-CONFIG_ZPOOL=m
+CONFIG_USERFAULTFD=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -294,6 +293,7 @@ CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_TEST_ASYNC_DRIVER_PROBE=m
 CONFIG_CONNECTOR=m
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
@@ -527,7 +527,7 @@ CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_SM3_GENERIC=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -542,7 +542,7 @@ CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_SM4_GENERIC=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_LZO=m
@@ -594,11 +594,7 @@ CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
-CONFIG_BITFIELD_KUNIT=m
-CONFIG_RESOURCE_KUNIT_TEST=m
 CONFIG_LINEAR_RANGES_TEST=m
-CONFIG_CMDLINE_KUNIT_TEST=m
-CONFIG_BITS_TEST=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
index 51283db..87c2cd6 100644 (file)
@@ -510,7 +510,7 @@ static inline int fls(unsigned int x)
        return 32 - cnt;
 }
 
-static inline int __fls(int x)
+static inline unsigned long __fls(unsigned long x)
 {
        return fls(x) - 1;
 }
index ffeda9a..d86b400 100644 (file)
@@ -151,6 +151,7 @@ static inline void release_thread(struct task_struct *dead_task)
 }
 
 unsigned long __get_wchan(struct task_struct *p);
+void show_registers(struct pt_regs *regs);
 
 #define        KSTK_EIP(tsk)   \
     ({                 \
index e4db7e2..b091ee9 100644 (file)
 #define BI_VIRT_VIRTIO_BASE    0x8004
 #define BI_VIRT_CTRL_BASE      0x8005
 
+/*
+ * A random seed used to initialize the RNG. Record format:
+ *
+ *   - length       [ 2 bytes, 16-bit big endian ]
+ *   - seed data    [ `length` bytes, padded to preserve 2-byte alignment ]
+ */
+#define BI_VIRT_RNG_SEED       0x8006
+
 #define VIRT_BOOTI_VERSION     MK_BI_VERSION(2, 0)
 
 #endif /* _UAPI_ASM_M68K_BOOTINFO_MAC_H */
index 59fc63f..5c8cba0 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/uaccess.h>
 #include <asm/traps.h>
 #include <asm/machdep.h>
+#include <asm/processor.h>
 #include <asm/siginfo.h>
 #include <asm/tlbflush.h>
 
index de156a0..010b3b5 100644 (file)
@@ -25,7 +25,7 @@
  *               check this.)
  * 990605 (jmt) - Rearranged things a bit wrt IOP detection; iop_present is
  *               gone, IOP base addresses are now in an array and the
- *               globally-visible functions take an IOP number instead of an
+ *               globally-visible functions take an IOP number instead of
  *               an actual base address.
  * 990610 (jmt) - Finished the message passing framework and it seems to work.
  *               Sending _definitely_ works; my adb-bus.c mods can send
@@ -66,7 +66,7 @@
  * a shared memory area in the IOP RAM. Each IOP has seven "channels"; each
  * channel is connected to a specific software driver on the IOP. For example
  * on the SCC IOP there is one channel for each serial port. Each channel has
- * an incoming and and outgoing message queue with a depth of one.
+ * an incoming and an outgoing message queue with a depth of one.
  *
  * A message is 32 bytes plus a state byte for the channel (MSG_IDLE, MSG_NEW,
  * MSG_RCVD, MSG_COMPLETE). To send a message you copy the message into the
index e357538..5cbaf6e 100644 (file)
 #include <asm/mac_baboon.h>
 #include <asm/hwtest.h>
 #include <asm/irq_regs.h>
-
-extern void show_registers(struct pt_regs *);
-
-irqreturn_t mac_nmi_handler(int, void *);
+#include <asm/processor.h>
 
 static unsigned int mac_irq_startup(struct irq_data *);
 static void mac_irq_shutdown(struct irq_data *);
@@ -142,6 +139,21 @@ static struct irq_chip mac_irq_chip = {
        .irq_shutdown   = mac_irq_shutdown,
 };
 
+static irqreturn_t mac_nmi_handler(int irq, void *dev_id)
+{
+       static volatile int in_nmi;
+
+       if (in_nmi)
+               return IRQ_HANDLED;
+       in_nmi = 1;
+
+       pr_info("Non-Maskable Interrupt\n");
+       show_registers(get_irq_regs());
+
+       in_nmi = 0;
+       return IRQ_HANDLED;
+}
+
 void __init mac_init_IRQ(void)
 {
        m68k_setup_irq_controller(&mac_irq_chip, handle_simple_irq, IRQ_USER,
@@ -254,18 +266,3 @@ static void mac_irq_shutdown(struct irq_data *data)
        else
                mac_irq_disable(data);
 }
-
-static volatile int in_nmi;
-
-irqreturn_t mac_nmi_handler(int irq, void *dev_id)
-{
-       if (in_nmi)
-               return IRQ_HANDLED;
-       in_nmi = 1;
-
-       pr_info("Non-Maskable Interrupt\n");
-       show_registers(get_irq_regs());
-
-       in_nmi = 0;
-       return IRQ_HANDLED;
-}
index 6886a5d..d15057d 100644 (file)
@@ -32,7 +32,7 @@
  *            33   : frame int (50/200 Hz periodic timer)
  *            34   : sample int (10/20 KHz periodic timer)
  *
-*/
+ */
 
 static void q40_irq_handler(unsigned int, struct pt_regs *fp);
 static void q40_irq_enable(struct irq_data *data);
index 7ec2081..7321b3b 100644 (file)
@@ -211,7 +211,7 @@ void clear_context(unsigned long context)
 
      if(context) {
             if(!ctx_alloc[context])
-                    panic("clear_context: context not allocated\n");
+                    panic("%s: context not allocated\n", __func__);
 
             ctx_alloc[context]->context = SUN3_INVALID_CONTEXT;
             ctx_alloc[context] = (struct mm_struct *)0;
@@ -261,7 +261,7 @@ unsigned long get_free_context(struct mm_struct *mm)
                }
                // check to make sure one was really free...
                if(new == CONTEXTS_NUM)
-                       panic("get_free_context: failed to find free context");
+                       panic("%s: failed to find free context", __func__);
        }
 
        ctx_alloc[new] = mm;
@@ -369,16 +369,15 @@ int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault)
        }
 
 #ifdef DEBUG_MMU_EMU
-       pr_info("mmu_emu_handle_fault: vaddr=%lx type=%s crp=%p\n",
-               vaddr, read_flag ? "read" : "write", crp);
+       pr_info("%s: vaddr=%lx type=%s crp=%p\n", __func__, vaddr,
+               read_flag ? "read" : "write", crp);
 #endif
 
        segment = (vaddr >> SUN3_PMEG_SIZE_BITS) & 0x7FF;
        offset  = (vaddr >> SUN3_PTE_SIZE_BITS) & 0xF;
 
 #ifdef DEBUG_MMU_EMU
-       pr_info("mmu_emu_handle_fault: segment=%lx offset=%lx\n", segment,
-               offset);
+       pr_info("%s: segment=%lx offset=%lx\n", __func__, segment, offset);
 #endif
 
        pte = (pte_t *) pgd_val (*(crp + segment));
index 632ba20..4ab2294 100644 (file)
@@ -2,6 +2,7 @@
 
 #include <linux/reboot.h>
 #include <linux/serial_core.h>
+#include <linux/random.h>
 #include <clocksource/timer-goldfish.h>
 
 #include <asm/bootinfo.h>
@@ -92,6 +93,16 @@ int __init virt_parse_bootinfo(const struct bi_record *record)
                data += 4;
                virt_bi_data.virtio.irq = be32_to_cpup(data);
                break;
+       case BI_VIRT_RNG_SEED: {
+               u16 len = be16_to_cpup(data);
+               add_bootloader_randomness(data + 2, len);
+               /*
+                * Zero the data to preserve forward secrecy, and zero the
+                * length to prevent kexec from using it.
+                */
+               memzero_explicit((void *)data, len + 2);
+               break;
+       }
        default:
                unknown = 1;
                break;
index 95818f9..896aa6e 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/hwtest.h>
 #include <asm/irq.h>
 #include <asm/irq_regs.h>
+#include <asm/processor.h>
 #include <asm/virt.h>
 
 #define GFPIC_REG_IRQ_PENDING           0x04
@@ -19,8 +20,6 @@
 #define GFPIC_REG_IRQ_DISABLE           0x0c
 #define GFPIC_REG_IRQ_ENABLE            0x10
 
-extern void show_registers(struct pt_regs *regs);
-
 static struct resource picres[6];
 static const char *picname[6] = {
        "goldfish_pic.0",
index cb820f1..1560c41 100644 (file)
@@ -8,20 +8,15 @@
 
 #define VIRTIO_BUS_NB  128
 
-static int __init virt_virtio_init(unsigned int id)
+static struct platform_device * __init virt_virtio_init(unsigned int id)
 {
        const struct resource res[] = {
                DEFINE_RES_MEM(virt_bi_data.virtio.mmio + id * 0x200, 0x200),
                DEFINE_RES_IRQ(virt_bi_data.virtio.irq + id),
        };
-       struct platform_device *pdev;
 
-       pdev = platform_device_register_simple("virtio-mmio", id,
+       return platform_device_register_simple("virtio-mmio", id,
                                               res, ARRAY_SIZE(res));
-       if (IS_ERR(pdev))
-               return PTR_ERR(pdev);
-
-       return 0;
 }
 
 static int __init virt_platform_init(void)
@@ -35,8 +30,10 @@ static int __init virt_platform_init(void)
                DEFINE_RES_MEM(virt_bi_data.rtc.mmio + 0x1000, 0x1000),
                DEFINE_RES_IRQ(virt_bi_data.rtc.irq + 1),
        };
-       struct platform_device *pdev;
+       struct platform_device *pdev1, *pdev2;
+       struct platform_device *pdevs[VIRTIO_BUS_NB];
        unsigned int i;
+       int ret = 0;
 
        if (!MACH_IS_VIRT)
                return -ENODEV;
@@ -44,29 +41,40 @@ static int __init virt_platform_init(void)
        /* We need this to have DMA'able memory provided to goldfish-tty */
        min_low_pfn = 0;
 
-       pdev = platform_device_register_simple("goldfish_tty",
-                                              PLATFORM_DEVID_NONE,
-                                              goldfish_tty_res,
-                                              ARRAY_SIZE(goldfish_tty_res));
-       if (IS_ERR(pdev))
-               return PTR_ERR(pdev);
+       pdev1 = platform_device_register_simple("goldfish_tty",
+                                               PLATFORM_DEVID_NONE,
+                                               goldfish_tty_res,
+                                               ARRAY_SIZE(goldfish_tty_res));
+       if (IS_ERR(pdev1))
+               return PTR_ERR(pdev1);
 
-       pdev = platform_device_register_simple("goldfish_rtc",
-                                              PLATFORM_DEVID_NONE,
-                                              goldfish_rtc_res,
-                                              ARRAY_SIZE(goldfish_rtc_res));
-       if (IS_ERR(pdev))
-               return PTR_ERR(pdev);
+       pdev2 = platform_device_register_simple("goldfish_rtc",
+                                               PLATFORM_DEVID_NONE,
+                                               goldfish_rtc_res,
+                                               ARRAY_SIZE(goldfish_rtc_res));
+       if (IS_ERR(pdev2)) {
+               ret = PTR_ERR(pdev2);
+               goto err_unregister_tty;
+       }
 
        for (i = 0; i < VIRTIO_BUS_NB; i++) {
-               int err;
-
-               err = virt_virtio_init(i);
-               if (err)
-                       return err;
+               pdevs[i] = virt_virtio_init(i);
+               if (IS_ERR(pdevs[i])) {
+                       ret = PTR_ERR(pdevs[i]);
+                       goto err_unregister_rtc_virtio;
+               }
        }
 
        return 0;
+
+err_unregister_rtc_virtio:
+       while (i > 0)
+               platform_device_unregister(pdevs[--i]);
+       platform_device_unregister(pdev2);
+err_unregister_tty:
+       platform_device_unregister(pdev1);
+
+       return ret;
 }
 
 arch_initcall(virt_platform_init);
index b0a034b..42e6966 100644 (file)
 
                clocks = <&cgu X1000_CLK_RTCLK>,
                         <&cgu X1000_CLK_EXCLK>,
-                        <&cgu X1000_CLK_PCLK>;
-               clock-names = "rtc", "ext", "pclk";
+                        <&cgu X1000_CLK_PCLK>,
+                        <&cgu X1000_CLK_TCU>;
+               clock-names = "rtc", "ext", "pclk", "tcu";
 
                interrupt-controller;
                #interrupt-cells = <1>;
index dbf21af..65a5da7 100644 (file)
 
                clocks = <&cgu X1830_CLK_RTCLK>,
                         <&cgu X1830_CLK_EXCLK>,
-                        <&cgu X1830_CLK_PCLK>;
-               clock-names = "rtc", "ext", "pclk";
+                        <&cgu X1830_CLK_PCLK>,
+                        <&cgu X1830_CLK_TCU>;
+               clock-names = "rtc", "ext", "pclk", "tcu";
 
                interrupt-controller;
                #interrupt-cells = <1>;
index a89aaad..930c450 100644 (file)
@@ -44,6 +44,7 @@ static __init unsigned int ranchu_measure_hpt_freq(void)
                      __func__);
 
        rtc_base = of_iomap(np, 0);
+       of_node_put(np);
        if (!rtc_base)
                panic("%s(): Failed to ioremap Goldfish RTC base!", __func__);
 
index 5204fc6..1187729 100644 (file)
@@ -208,6 +208,12 @@ void __init ltq_soc_init(void)
                        of_address_to_resource(np_sysgpe, 0, &res_sys[2]))
                panic("Failed to get core resources");
 
+       of_node_put(np_status);
+       of_node_put(np_ebu);
+       of_node_put(np_sys1);
+       of_node_put(np_syseth);
+       of_node_put(np_sysgpe);
+
        if ((request_mem_region(res_status.start, resource_size(&res_status),
                                res_status.name) < 0) ||
                (request_mem_region(res_ebu.start, resource_size(&res_ebu),
index b732495..20622bf 100644 (file)
@@ -408,6 +408,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
                if (!ltq_eiu_membase)
                        panic("Failed to remap eiu memory");
        }
+       of_node_put(eiu_node);
 
        return 0;
 }
index 084f6ca..d444a1b 100644 (file)
@@ -441,6 +441,10 @@ void __init ltq_soc_init(void)
                        of_address_to_resource(np_ebu, 0, &res_ebu))
                panic("Failed to get core resources");
 
+       of_node_put(np_pmu);
+       of_node_put(np_cgu);
+       of_node_put(np_ebu);
+
        if (!request_mem_region(res_pmu.start, resource_size(&res_pmu),
                                res_pmu.name) ||
                !request_mem_region(res_cgu.start, resource_size(&res_cgu),
index bbf1e38..2cb708c 100644 (file)
@@ -214,6 +214,8 @@ static void update_gic_frequency_dt(void)
 
        if (of_update_property(node, &gic_frequency_prop) < 0)
                pr_err("error updating gic frequency property\n");
+
+       of_node_put(node);
 }
 
 #endif
index 1299156..d9c8c4e 100644 (file)
@@ -98,13 +98,18 @@ static int __init pic32_of_prepare_platform_data(struct of_dev_auxdata *lookup)
                np = of_find_compatible_node(NULL, NULL, lookup->compatible);
                if (np) {
                        lookup->name = (char *)np->name;
-                       if (lookup->phys_addr)
+                       if (lookup->phys_addr) {
+                               of_node_put(np);
                                continue;
+                       }
                        if (!of_address_to_resource(np, 0, &res))
                                lookup->phys_addr = res.start;
+                       of_node_put(np);
                }
        }
 
+       of_node_put(root);
+
        return 0;
 }
 
index 7174e9a..777b515 100644 (file)
@@ -32,6 +32,9 @@ static unsigned int pic32_xlate_core_timer_irq(void)
                goto default_map;
 
        irq = irq_of_parse_and_map(node, 0);
+
+       of_node_put(node);
+
        if (!irq)
                goto default_map;
 
index 587c7b9..ea8072a 100644 (file)
@@ -40,6 +40,8 @@ __iomem void *plat_of_remap_node(const char *node)
        if (of_address_to_resource(np, 0, &res))
                panic("Failed to get resource for %s", node);
 
+       of_node_put(np);
+
        if (!request_mem_region(res.start,
                                resource_size(&res),
                                res.name))
index 7b7f25b..9240bcd 100644 (file)
@@ -640,8 +640,6 @@ static int icu_get_irq(unsigned int irq)
 
        printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
 
-       atomic_inc(&irq_err_count);
-
        return -1;
 }
 
index 8ae15c2..c6ad6f8 100644 (file)
@@ -25,7 +25,7 @@ struct or1k_frameinfo {
 /*
  * Verify a frameinfo structure.  The return address should be a valid text
  * address.  The frame pointer may be null if its the last frame, otherwise
- * the frame pointer should point to a location in the stack after the the
+ * the frame pointer should point to a location in the stack after the
  * top of the next frame up.
  */
 static inline int or1k_frameinfo_valid(struct or1k_frameinfo *frameinfo)
index 5f2448d..fa40005 100644 (file)
@@ -10,6 +10,7 @@ config PARISC
        select ARCH_WANT_FRAME_POINTERS
        select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_STRICT_KERNEL_RWX
+       select ARCH_HAS_STRICT_MODULE_RWX
        select ARCH_HAS_UBSAN_SANITIZE_ALL
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_NO_SG_CHAIN
index d63a2ac..55d29c4 100644 (file)
@@ -12,7 +12,7 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
        pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
 }
 
-#if defined(CONFIG_STI_CONSOLE) || defined(CONFIG_FB_STI)
+#if defined(CONFIG_FB_STI)
 int fb_is_primary_device(struct fb_info *info);
 #else
 static inline int fb_is_primary_device(struct fb_info *info)
index 2673d57..94652e1 100644 (file)
@@ -224,8 +224,13 @@ int main(void)
        BLANK();
        DEFINE(ASM_SIGFRAME_SIZE, PARISC_RT_SIGFRAME_SIZE);
        DEFINE(SIGFRAME_CONTEXT_REGS, offsetof(struct rt_sigframe, uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE);
+#ifdef CONFIG_64BIT
        DEFINE(ASM_SIGFRAME_SIZE32, PARISC_RT_SIGFRAME_SIZE32);
        DEFINE(SIGFRAME_CONTEXT_REGS32, offsetof(struct compat_rt_sigframe, uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE32);
+#else
+       DEFINE(ASM_SIGFRAME_SIZE32, PARISC_RT_SIGFRAME_SIZE);
+       DEFINE(SIGFRAME_CONTEXT_REGS32, offsetof(struct rt_sigframe, uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE);
+#endif
        BLANK();
        DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
        DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
index c8a11fc..a9bc578 100644 (file)
@@ -722,7 +722,10 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon
                return;
 
        if (parisc_requires_coherency()) {
-               flush_user_cache_page(vma, vmaddr);
+               if (vma->vm_flags & VM_SHARED)
+                       flush_data_cache();
+               else
+                       flush_user_cache_page(vma, vmaddr);
                return;
        }
 
index ed1e88a..bac581b 100644 (file)
@@ -146,7 +146,7 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
 "      depw    %%r0,31,2,%4\n"
 "1:    ldw     0(%%sr1,%4),%0\n"
 "2:    ldw     4(%%sr1,%4),%3\n"
-"      subi    32,%4,%2\n"
+"      subi    32,%2,%2\n"
 "      mtctl   %2,11\n"
 "      vshd    %0,%3,%0\n"
 "3:    \n"
index 494ca41..d41ddb3 100644 (file)
@@ -102,7 +102,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[])
      * that happen.  Want to keep this overhead low, but still provide
      * some information to the customer.  All exits from this routine
      * need to restore Fpu_register[0]
-    */
+     */
 
     bflags=(Fpu_register[0] & 0xf8000000);
     Fpu_register[0] &= 0x07ffffff;
index c2ce2e6..4d8f26c 100644 (file)
@@ -256,6 +256,7 @@ config PPC
        select IRQ_FORCED_THREADING
        select MMU_GATHER_PAGE_SIZE
        select MMU_GATHER_RCU_TABLE_FREE
+       select MMU_GATHER_MERGE_VMAS
        select MODULES_USE_ELF_RELA
        select NEED_DMA_MAP_STATE               if PPC64 || NOT_COHERENT_CACHE
        select NEED_PER_CPU_EMBED_FIRST_CHUNK   if PPC64
@@ -281,6 +282,10 @@ config PPC
        # Please keep this list sorted alphabetically.
        #
 
+config PPC_LONG_DOUBLE_128
+       depends on PPC64
+       def_bool $(success,test "$(shell,echo __LONG_DOUBLE_128__ | $(CC) -E -P -)" = 1)
+
 config PPC_BARRIER_NOSPEC
        bool
        default y
@@ -358,6 +363,10 @@ config ARCH_SUSPEND_NONZERO_CPU
        def_bool y
        depends on PPC_POWERNV || PPC_PSERIES
 
+config ARCH_HAS_ADD_PAGES
+       def_bool y
+       depends on ARCH_ENABLE_MEMORY_HOTPLUG
+
 config PPC_DCR_NATIVE
        bool
 
diff --git a/arch/powerpc/include/asm/bpf_perf_event.h b/arch/powerpc/include/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..e8a7b4f
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BPF_PERF_EVENT_H
+#define _ASM_POWERPC_BPF_PERF_EVENT_H
+
+#include <asm/ptrace.h>
+
+typedef struct user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _ASM_POWERPC_BPF_PERF_EVENT_H */
index 09a9ae5..b3de610 100644 (file)
@@ -19,8 +19,6 @@
 
 #include <linux/pagemap.h>
 
-#define tlb_start_vma(tlb, vma)        do { } while (0)
-#define tlb_end_vma(tlb, vma)  do { } while (0)
 #define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
 
 #define tlb_flush tlb_flush
diff --git a/arch/powerpc/include/uapi/asm/bpf_perf_event.h b/arch/powerpc/include/uapi/asm/bpf_perf_event.h
deleted file mode 100644 (file)
index 5e1e648..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
-#define _UAPI__ASM_BPF_PERF_EVENT_H__
-
-#include <asm/ptrace.h>
-
-typedef struct user_pt_regs bpf_user_pt_regs_t;
-
-#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
index f91f0f2..c8cf924 100644 (file)
@@ -20,6 +20,7 @@ CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_prom_init.o += -fno-stack-protector
 CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING
 CFLAGS_prom_init.o += -ffreestanding
+CFLAGS_prom_init.o += $(call cc-option, -ftrivial-auto-var-init=uninitialized)
 
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
index ee04338..0fbda89 100644 (file)
@@ -1855,7 +1855,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
                tm_reclaim_current(0);
 #endif
 
-       memset(regs->gpr, 0, sizeof(regs->gpr));
+       memset(&regs->gpr[1], 0, sizeof(regs->gpr) - sizeof(regs->gpr[0]));
        regs->ctr = 0;
        regs->link = 0;
        regs->xer = 0;
index 04694ec..13d6cb1 100644 (file)
@@ -2302,7 +2302,7 @@ static void __init prom_init_stdout(void)
 
 static int __init prom_find_machine_type(void)
 {
-       char compat[256];
+       static char compat[256] __prombss;
        int len, i = 0;
 #ifdef CONFIG_PPC64
        phandle rtas;
index b183ab9..dfa5f72 100644 (file)
@@ -13,7 +13,7 @@
 # If you really need to reference something from prom_init.o add
 # it to the list below:
 
-grep "^CONFIG_KASAN=y$" .config >/dev/null
+grep "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} >/dev/null
 if [ $? -eq 0 ]
 then
        MEM_FUNCS="__memcpy __memset"
index a6fce31..6931339 100644 (file)
@@ -1071,7 +1071,7 @@ static struct rtas_filter rtas_filters[] __ro_after_init = {
        { "get-time-of-day", -1, -1, -1, -1, -1 },
        { "ibm,get-vpd", -1, 0, -1, 1, 2 },
        { "ibm,lpar-perftools", -1, 2, 3, -1, -1 },
-       { "ibm,platform-dump", -1, 4, 5, -1, -1 },
+       { "ibm,platform-dump", -1, 4, 5, -1, -1 },              /* Special cased */
        { "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 },
        { "ibm,scan-log-dump", -1, 0, 1, -1, -1 },
        { "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 },
@@ -1120,6 +1120,15 @@ static bool block_rtas_call(int token, int nargs,
                                size = 1;
 
                        end = base + size - 1;
+
+                       /*
+                        * Special case for ibm,platform-dump - NULL buffer
+                        * address is used to indicate end of dump processing
+                        */
+                       if (!strcmp(f->name, "ibm,platform-dump") &&
+                           base == 0)
+                               return false;
+
                        if (!in_rmo_buf(base, end))
                                goto err;
                }
index eb0077b..1a02629 100644 (file)
@@ -935,12 +935,6 @@ void __init setup_arch(char **cmdline_p)
        /* Print various info about the machine that has been gathered so far. */
        print_system_info();
 
-       /* Reserve large chunks of memory for use by CMA for KVM. */
-       kvm_cma_reserve();
-
-       /*  Reserve large chunks of memory for us by CMA for hugetlb */
-       gigantic_hugetlb_cma_reserve();
-
        klp_init_thread_info(&init_task);
 
        setup_initial_init_mm(_stext, _etext, _edata, _end);
@@ -955,6 +949,13 @@ void __init setup_arch(char **cmdline_p)
 
        initmem_init();
 
+       /*
+        * Reserve large chunks of memory for use by CMA for KVM and hugetlb. These must
+        * be called after initmem_init(), so that pageblock_order is initialised.
+        */
+       kvm_cma_reserve();
+       gigantic_hugetlb_cma_reserve();
+
        early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
 
        if (ppc_md.setup_arch)
index 52b7768..a97128a 100644 (file)
@@ -105,6 +105,37 @@ void __ref arch_remove_linear_mapping(u64 start, u64 size)
        vm_unmap_aliases();
 }
 
+/*
+ * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
+ * updating.
+ */
+static void update_end_of_memory_vars(u64 start, u64 size)
+{
+       unsigned long end_pfn = PFN_UP(start + size);
+
+       if (end_pfn > max_pfn) {
+               max_pfn = end_pfn;
+               max_low_pfn = end_pfn;
+               high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
+       }
+}
+
+int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
+                   struct mhp_params *params)
+{
+       int ret;
+
+       ret = __add_pages(nid, start_pfn, nr_pages, params);
+       if (ret)
+               return ret;
+
+       /* update max_pfn, max_low_pfn and high_memory */
+       update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
+                                 nr_pages << PAGE_SHIFT);
+
+       return ret;
+}
+
 int __ref arch_add_memory(int nid, u64 start, u64 size,
                          struct mhp_params *params)
 {
@@ -115,7 +146,7 @@ int __ref arch_add_memory(int nid, u64 start, u64 size,
        rc = arch_create_linear_mapping(nid, start, size, params);
        if (rc)
                return rc;
-       rc = __add_pages(nid, start_pfn, nr_pages, params);
+       rc = add_pages(nid, start_pfn, nr_pages, params);
        if (rc)
                arch_remove_linear_mapping(start, size);
        return rc;
index 7d4368d..b80fc4a 100644 (file)
@@ -96,8 +96,8 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
                pgdp = pgd_offset_k(ea);
                p4dp = p4d_offset(pgdp, ea);
                if (p4d_none(*p4dp)) {
-                       pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
-                       p4d_populate(&init_mm, p4dp, pmdp);
+                       pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
+                       p4d_populate(&init_mm, p4dp, pudp);
                }
                pudp = pud_offset(p4dp, ea);
                if (pud_none(*pudp)) {
@@ -106,7 +106,7 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
                }
                pmdp = pmd_offset(pudp, ea);
                if (!pmd_present(*pmdp)) {
-                       ptep = early_alloc_pgtable(PAGE_SIZE);
+                       ptep = early_alloc_pgtable(PTE_TABLE_SIZE);
                        pmd_populate_kernel(&init_mm, pmdp, ptep);
                }
                ptep = pte_offset_kernel(pmdp, ea);
diff --git a/arch/powerpc/platforms/microwatt/microwatt.h b/arch/powerpc/platforms/microwatt/microwatt.h
new file mode 100644 (file)
index 0000000..335417e
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MICROWATT_H
+#define _MICROWATT_H
+
+void microwatt_rng_init(void);
+
+#endif /* _MICROWATT_H */
index 7bc4d1c..8ece87d 100644 (file)
@@ -11,6 +11,7 @@
 #include <asm/archrandom.h>
 #include <asm/cputable.h>
 #include <asm/machdep.h>
+#include "microwatt.h"
 
 #define DARN_ERR 0xFFFFFFFFFFFFFFFFul
 
@@ -29,7 +30,7 @@ static int microwatt_get_random_darn(unsigned long *v)
        return 1;
 }
 
-static __init int rng_init(void)
+void __init microwatt_rng_init(void)
 {
        unsigned long val;
        int i;
@@ -37,12 +38,7 @@ static __init int rng_init(void)
        for (i = 0; i < 10; i++) {
                if (microwatt_get_random_darn(&val)) {
                        ppc_md.get_random_seed = microwatt_get_random_darn;
-                       return 0;
+                       return;
                }
        }
-
-       pr_warn("Unable to use DARN for get_random_seed()\n");
-
-       return -EIO;
 }
-machine_subsys_initcall(, rng_init);
index 0b02603..6b32539 100644 (file)
@@ -16,6 +16,8 @@
 #include <asm/xics.h>
 #include <asm/udbg.h>
 
+#include "microwatt.h"
+
 static void __init microwatt_init_IRQ(void)
 {
        xics_init();
@@ -32,10 +34,16 @@ static int __init microwatt_populate(void)
 }
 machine_arch_initcall(microwatt, microwatt_populate);
 
+static void __init microwatt_setup_arch(void)
+{
+       microwatt_rng_init();
+}
+
 define_machine(microwatt) {
        .name                   = "microwatt",
        .probe                  = microwatt_probe,
        .init_IRQ               = microwatt_init_IRQ,
+       .setup_arch             = microwatt_setup_arch,
        .progress               = udbg_progress,
        .calibrate_decr         = generic_calibrate_decr,
 };
index e297bf4..866efdc 100644 (file)
@@ -42,4 +42,6 @@ ssize_t memcons_copy(struct memcons *mc, char *to, loff_t pos, size_t count);
 u32 __init memcons_get_size(struct memcons *mc);
 struct memcons *__init memcons_init(struct device_node *node, const char *mc_prop_name);
 
+void pnv_rng_init(void);
+
 #endif /* _POWERNV_H */
index e3d44b3..3805ad1 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/smp.h>
+#include "powernv.h"
 
 #define DARN_ERR 0xFFFFFFFFFFFFFFFFul
 
@@ -28,7 +29,6 @@ struct powernv_rng {
 
 static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng);
 
-
 int powernv_hwrng_present(void)
 {
        struct powernv_rng *rng;
@@ -98,9 +98,6 @@ static int __init initialise_darn(void)
                        return 0;
                }
        }
-
-       pr_warn("Unable to use DARN for get_random_seed()\n");
-
        return -EIO;
 }
 
@@ -163,32 +160,59 @@ static __init int rng_create(struct device_node *dn)
 
        rng_init_per_cpu(rng, dn);
 
-       pr_info_once("Registering arch random hook.\n");
-
        ppc_md.get_random_seed = powernv_get_random_long;
 
        return 0;
 }
 
-static __init int rng_init(void)
+static int __init pnv_get_random_long_early(unsigned long *v)
 {
        struct device_node *dn;
-       int rc;
-
-       for_each_compatible_node(dn, NULL, "ibm,power-rng") {
-               rc = rng_create(dn);
-               if (rc) {
-                       pr_err("Failed creating rng for %pOF (%d).\n",
-                               dn, rc);
-                       continue;
-               }
 
-               /* Create devices for hwrng driver */
-               of_platform_device_create(dn, NULL, NULL);
-       }
+       if (!slab_is_available())
+               return 0;
+
+       if (cmpxchg(&ppc_md.get_random_seed, pnv_get_random_long_early,
+                   NULL) != pnv_get_random_long_early)
+               return 0;
+
+       for_each_compatible_node(dn, NULL, "ibm,power-rng")
+               rng_create(dn);
+
+       if (!ppc_md.get_random_seed)
+               return 0;
+       return ppc_md.get_random_seed(v);
+}
 
-       initialise_darn();
+void __init pnv_rng_init(void)
+{
+       struct device_node *dn;
+
+       /* Prefer darn over the rest. */
+       if (!initialise_darn())
+               return;
+
+       dn = of_find_compatible_node(NULL, NULL, "ibm,power-rng");
+       if (dn)
+               ppc_md.get_random_seed = pnv_get_random_long_early;
+
+       of_node_put(dn);
+}
+
+static int __init pnv_rng_late_init(void)
+{
+       struct device_node *dn;
+       unsigned long v;
+
+       /* In case it wasn't called during init for some other reason. */
+       if (ppc_md.get_random_seed == pnv_get_random_long_early)
+               pnv_get_random_long_early(&v);
+
+       if (ppc_md.get_random_seed == powernv_get_random_long) {
+               for_each_compatible_node(dn, NULL, "ibm,power-rng")
+                       of_platform_device_create(dn, NULL, NULL);
+       }
 
        return 0;
 }
-machine_subsys_initcall(powernv, rng_init);
+machine_subsys_initcall(powernv, pnv_rng_late_init);
index 824c3ad..dac545a 100644 (file)
@@ -203,6 +203,8 @@ static void __init pnv_setup_arch(void)
        pnv_check_guarded_cores();
 
        /* XXX PMCS */
+
+       pnv_rng_init();
 }
 
 static void __init pnv_init(void)
index f5c916c..1d75b77 100644 (file)
@@ -122,4 +122,6 @@ void pseries_lpar_read_hblkrm_characteristics(void);
 static inline void pseries_lpar_read_hblkrm_characteristics(void) { }
 #endif
 
+void pseries_rng_init(void);
+
 #endif /* _PSERIES_PSERIES_H */
index 6268545..6ddfdea 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/archrandom.h>
 #include <asm/machdep.h>
 #include <asm/plpar_wrappers.h>
+#include "pseries.h"
 
 
 static int pseries_get_random_long(unsigned long *v)
@@ -24,19 +25,13 @@ static int pseries_get_random_long(unsigned long *v)
        return 0;
 }
 
-static __init int rng_init(void)
+void __init pseries_rng_init(void)
 {
        struct device_node *dn;
 
        dn = of_find_compatible_node(NULL, NULL, "ibm,random");
        if (!dn)
-               return -ENODEV;
-
-       pr_info("Registering arch random hook.\n");
-
+               return;
        ppc_md.get_random_seed = pseries_get_random_long;
-
        of_node_put(dn);
-       return 0;
 }
-machine_subsys_initcall(pseries, rng_init);
index afb0742..ee4f1db 100644 (file)
@@ -839,6 +839,7 @@ static void __init pSeries_setup_arch(void)
        }
 
        ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
+       pseries_rng_init();
 }
 
 static void pseries_panic(char *str)
index 7d51286..d02911e 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/of_fdt.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/bitmap.h>
 #include <linux/cpumask.h>
 #include <linux/mm.h>
 #include <linux/delay.h>
@@ -57,7 +58,7 @@ static int __init xive_irq_bitmap_add(int base, int count)
        spin_lock_init(&xibm->lock);
        xibm->base = base;
        xibm->count = count;
-       xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL);
+       xibm->bitmap = bitmap_zalloc(xibm->count, GFP_KERNEL);
        if (!xibm->bitmap) {
                kfree(xibm);
                return -ENOMEM;
@@ -75,7 +76,7 @@ static void xive_irq_bitmap_remove_all(void)
 
        list_for_each_entry_safe(xibm, tmp, &xive_irq_bitmaps, list) {
                list_del(&xibm->list);
-               kfree(xibm->bitmap);
+               bitmap_free(xibm->bitmap);
                kfree(xibm);
        }
 }
index c22f581..fcbb81f 100644 (file)
@@ -38,7 +38,7 @@ config RISCV
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
        select ARCH_SUPPORTS_HUGETLBFS if MMU
-       select ARCH_SUPPORTS_PAGE_TABLE_CHECK
+       select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
        select ARCH_USE_MEMTEST
        select ARCH_USE_QUEUED_RWLOCKS
        select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
@@ -364,8 +364,13 @@ config RISCV_ISA_SVPBMT
        select RISCV_ALTERNATIVE
        default y
        help
-          Adds support to dynamically detect the presence of the SVPBMT extension
-          (Supervisor-mode: page-based memory types) and enable its usage.
+          Adds support to dynamically detect the presence of the SVPBMT
+          ISA-extension (Supervisor-mode: page-based memory types) and
+          enable its usage.
+
+          The memory type for a page contains a combination of attributes
+          that indicate the cacheability, idempotency, and ordering
+          properties for access to that page.
 
           The SVPBMT extension is only available on 64Bit cpus.
 
index ebfcd5c..457ac72 100644 (file)
@@ -35,6 +35,7 @@ config ERRATA_SIFIVE_CIP_1200
 
 config ERRATA_THEAD
        bool "T-HEAD errata"
+       depends on !XIP_KERNEL
        select RISCV_ALTERNATIVE
        help
          All T-HEAD errata Kconfig depend on this Kconfig. Disabling
index 34cf8a5..81029d4 100644 (file)
@@ -73,6 +73,7 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
 endif
 
 KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
+KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
 
 # GCC versions that support the "-mstrict-align" option default to allowing
 # unaligned accesses.  While unaligned accesses are explicitly allowed in the
@@ -110,7 +111,7 @@ PHONY += vdso_install
 vdso_install:
        $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
        $(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
-               $(build)=arch/riscv/kernel/compat_vdso $@)
+               $(build)=arch/riscv/kernel/compat_vdso compat_$@)
 
 ifeq ($(KBUILD_EXTMOD),)
 ifeq ($(CONFIG_MMU),y)
index 039b92a..f72540b 100644 (file)
@@ -35,7 +35,7 @@
        gpio-keys {
                compatible = "gpio-keys";
 
-               key0 {
+               key {
                        label = "KEY0";
                        linux,code = <BTN_0>;
                        gpios = <&gpio0 10 GPIO_ACTIVE_LOW>;
index b9e30df..8abdbe2 100644 (file)
@@ -47,7 +47,7 @@
        gpio-keys {
                compatible = "gpio-keys";
 
-               boot {
+               key-boot {
                        label = "BOOT";
                        linux,code = <BTN_0>;
                        gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
index 8d23401..3c6df1e 100644 (file)
@@ -52,7 +52,7 @@
        gpio-keys {
                compatible = "gpio-keys";
 
-               boot {
+               key-boot {
                        label = "BOOT";
                        linux,code = <BTN_0>;
                        gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
index 24fd83b..03c9843 100644 (file)
        gpio-keys {
                compatible = "gpio-keys";
 
-               up {
+               key-up {
                        label = "UP";
                        linux,code = <BTN_1>;
                        gpios = <&gpio1_0 7 GPIO_ACTIVE_LOW>;
                };
 
-               press {
+               key-press {
                        label = "PRESS";
                        linux,code = <BTN_0>;
                        gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
                };
 
-               down {
+               key-down {
                        label = "DOWN";
                        linux,code = <BTN_2>;
                        gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
index 25341f3..7164ad0 100644 (file)
@@ -23,7 +23,7 @@
        gpio-keys {
                compatible = "gpio-keys";
 
-               boot {
+               key-boot {
                        label = "BOOT";
                        linux,code = <BTN_0>;
                        gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
index 8c32591..496d3b7 100644 (file)
@@ -50,6 +50,7 @@
                        riscv,isa = "rv64imafdc";
                        clocks = <&clkcfg CLK_CPU>;
                        tlb-split;
+                       next-level-cache = <&cctrllr>;
                        status = "okay";
 
                        cpu1_intc: interrupt-controller {
@@ -77,6 +78,7 @@
                        riscv,isa = "rv64imafdc";
                        clocks = <&clkcfg CLK_CPU>;
                        tlb-split;
+                       next-level-cache = <&cctrllr>;
                        status = "okay";
 
                        cpu2_intc: interrupt-controller {
                        riscv,isa = "rv64imafdc";
                        clocks = <&clkcfg CLK_CPU>;
                        tlb-split;
+                       next-level-cache = <&cctrllr>;
                        status = "okay";
 
                        cpu3_intc: interrupt-controller {
                        riscv,isa = "rv64imafdc";
                        clocks = <&clkcfg CLK_CPU>;
                        tlb-split;
+                       next-level-cache = <&cctrllr>;
                        status = "okay";
                        cpu4_intc: interrupt-controller {
                                #interrupt-cells = <1>;
                        riscv,ndev = <186>;
                };
 
+               pdma: dma-controller@3000000 {
+                       compatible = "sifive,fu540-c000-pdma", "sifive,pdma0";
+                       reg = <0x0 0x3000000 0x0 0x8000>;
+                       interrupt-parent = <&plic>;
+                       interrupts = <5 6>, <7 8>, <9 10>, <11 12>;
+                       dma-channels = <4>;
+                       #dma-cells = <1>;
+               };
+
                clkcfg: clkcfg@20002000 {
                        compatible = "microchip,mpfs-clkcfg";
                        reg = <0x0 0x20002000 0x0 0x1000>, <0x0 0x3E001000 0x0 0x1000>;
index 672f02b..1031038 100644 (file)
@@ -111,6 +111,7 @@ void __init_or_module sifive_errata_patch_func(struct alt_entry *begin,
                        cpu_apply_errata |= tmp;
                }
        }
-       if (cpu_apply_errata != cpu_req_errata)
+       if (stage != RISCV_ALTERNATIVES_MODULE &&
+           cpu_apply_errata != cpu_req_errata)
                warn_miss_errata(cpu_req_errata - cpu_apply_errata);
 }
index 9e2888d..416ead0 100644 (file)
@@ -75,20 +75,20 @@ asm volatile(ALTERNATIVE(                                           \
        "nop\n\t"                                                       \
        "nop\n\t"                                                       \
        "nop",                                                          \
-       "li      t3, %2\n\t"                                            \
-       "slli    t3, t3, %4\n\t"                                        \
+       "li      t3, %1\n\t"                                            \
+       "slli    t3, t3, %3\n\t"                                        \
        "and     t3, %0, t3\n\t"                                        \
        "bne     t3, zero, 2f\n\t"                                      \
-       "li      t3, %3\n\t"                                            \
-       "slli    t3, t3, %4\n\t"                                        \
+       "li      t3, %2\n\t"                                            \
+       "slli    t3, t3, %3\n\t"                                        \
        "or      %0, %0, t3\n\t"                                        \
        "2:",  THEAD_VENDOR_ID,                                         \
                ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT)            \
        : "+r"(_val)                                                    \
-       : "0"(_val),                                                    \
-         "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_PBMT_SHIFT),              \
+       : "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_PBMT_SHIFT),              \
          "I"(_PAGE_PMA_THEAD >> ALT_THEAD_PBMT_SHIFT),                 \
-         "I"(ALT_THEAD_PBMT_SHIFT))
+         "I"(ALT_THEAD_PBMT_SHIFT)                                     \
+       : "t3")
 #else
 #define ALT_THEAD_PMA(_val)
 #endif
index 5c2aba5..dc42375 100644 (file)
@@ -175,7 +175,7 @@ static inline pud_t pfn_pud(unsigned long pfn, pgprot_t prot)
 
 static inline unsigned long _pud_pfn(pud_t pud)
 {
-       return pud_val(pud) >> _PAGE_PFN_SHIFT;
+       return __page_val_to_pfn(pud_val(pud));
 }
 
 static inline pmd_t *pud_pgtable(pud_t pud)
@@ -278,13 +278,13 @@ static inline p4d_t pfn_p4d(unsigned long pfn, pgprot_t prot)
 
 static inline unsigned long _p4d_pfn(p4d_t p4d)
 {
-       return p4d_val(p4d) >> _PAGE_PFN_SHIFT;
+       return __page_val_to_pfn(p4d_val(p4d));
 }
 
 static inline pud_t *p4d_pgtable(p4d_t p4d)
 {
        if (pgtable_l4_enabled)
-               return (pud_t *)pfn_to_virt(p4d_val(p4d) >> _PAGE_PFN_SHIFT);
+               return (pud_t *)pfn_to_virt(__page_val_to_pfn(p4d_val(p4d)));
 
        return (pud_t *)pud_pgtable((pud_t) { p4d_val(p4d) });
 }
@@ -292,7 +292,7 @@ static inline pud_t *p4d_pgtable(p4d_t p4d)
 
 static inline struct page *p4d_page(p4d_t p4d)
 {
-       return pfn_to_page(p4d_val(p4d) >> _PAGE_PFN_SHIFT);
+       return pfn_to_page(__page_val_to_pfn(p4d_val(p4d)));
 }
 
 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
@@ -347,7 +347,7 @@ static inline void pgd_clear(pgd_t *pgd)
 static inline p4d_t *pgd_pgtable(pgd_t pgd)
 {
        if (pgtable_l5_enabled)
-               return (p4d_t *)pfn_to_virt(pgd_val(pgd) >> _PAGE_PFN_SHIFT);
+               return (p4d_t *)pfn_to_virt(__page_val_to_pfn(pgd_val(pgd)));
 
        return (p4d_t *)p4d_pgtable((p4d_t) { pgd_val(pgd) });
 }
@@ -355,7 +355,7 @@ static inline p4d_t *pgd_pgtable(pgd_t pgd)
 
 static inline struct page *pgd_page(pgd_t pgd)
 {
-       return pfn_to_page(pgd_val(pgd) >> _PAGE_PFN_SHIFT);
+       return pfn_to_page(__page_val_to_pfn(pgd_val(pgd)));
 }
 #define pgd_page(pgd)  pgd_page(pgd)
 
index 1d1be9d..5dbd661 100644 (file)
@@ -261,7 +261,7 @@ static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
 
 static inline unsigned long _pgd_pfn(pgd_t pgd)
 {
-       return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
+       return __page_val_to_pfn(pgd_val(pgd));
 }
 
 static inline struct page *pmd_page(pmd_t pmd)
@@ -590,14 +590,14 @@ static inline pmd_t pmd_mkinvalid(pmd_t pmd)
        return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
 }
 
-#define __pmd_to_phys(pmd)  (pmd_val(pmd) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
+#define __pmd_to_phys(pmd)  (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT)
 
 static inline unsigned long pmd_pfn(pmd_t pmd)
 {
        return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
 }
 
-#define __pud_to_phys(pud)  (pud_val(pud) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
+#define __pud_to_phys(pud)  (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
 
 static inline unsigned long pud_pfn(pud_t pud)
 {
index c71d659..33bb60a 100644 (file)
@@ -78,7 +78,7 @@ obj-$(CONFIG_SMP) += cpu_ops_sbi.o
 endif
 obj-$(CONFIG_HOTPLUG_CPU)      += cpu-hotplug.o
 obj-$(CONFIG_KGDB)             += kgdb.o
-obj-$(CONFIG_KEXEC)            += kexec_relocate.o crash_save_regs.o machine_kexec.o
+obj-$(CONFIG_KEXEC_CORE)       += kexec_relocate.o crash_save_regs.o machine_kexec.o
 obj-$(CONFIG_KEXEC_FILE)       += elf_kexec.o machine_kexec_file.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump.o
 
index a6f62a6..12b05ce 100644 (file)
@@ -293,7 +293,6 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
                                                  unsigned int stage)
 {
        u32 cpu_req_feature = cpufeature_probe(stage);
-       u32 cpu_apply_feature = 0;
        struct alt_entry *alt;
        u32 tmp;
 
@@ -307,10 +306,8 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
                }
 
                tmp = (1U << alt->errata_id);
-               if (cpu_req_feature & tmp) {
+               if (cpu_req_feature & tmp)
                        patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
-                       cpu_apply_feature |= tmp;
-               }
        }
 }
 #endif
index 9cb8509..0cb9499 100644 (file)
@@ -349,7 +349,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
 {
        const char *strtab, *name, *shstrtab;
        const Elf_Shdr *sechdrs;
-       Elf_Rela *relas;
+       Elf64_Rela *relas;
        int i, r_type;
 
        /* String & section header string table */
index 1c00695..9826073 100644 (file)
@@ -54,7 +54,7 @@ static inline unsigned long gstage_pte_index(gpa_t addr, u32 level)
 
 static inline unsigned long gstage_pte_page_vaddr(pte_t pte)
 {
-       return (unsigned long)pfn_to_virt(pte_val(pte) >> _PAGE_PFN_SHIFT);
+       return (unsigned long)pfn_to_virt(__page_val_to_pfn(pte_val(pte)));
 }
 
 static int gstage_page_size_to_level(unsigned long page_size, u32 *out_level)
index 7f4ad5e..f3455dc 100644 (file)
@@ -781,9 +781,11 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
 
        if (kvm_request_pending(vcpu)) {
                if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
+                       kvm_vcpu_srcu_read_unlock(vcpu);
                        rcuwait_wait_event(wait,
                                (!vcpu->arch.power_off) && (!vcpu->arch.pause),
                                TASK_INTERRUPTIBLE);
+                       kvm_vcpu_srcu_read_lock(vcpu);
 
                        if (vcpu->arch.power_off || vcpu->arch.pause) {
                                /*
index 9f764df..6cd9399 100644 (file)
@@ -97,7 +97,7 @@ void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu)
                 * We ran out of VMIDs so we increment vmid_version and
                 * start assigning VMIDs from 1.
                 *
-                * This also means existing VMIDs assignement to all Guest
+                * This also means existing VMIDs assignment to all Guest
                 * instances is invalid and we have force VMID re-assignement
                 * for all Guest instances. The Guest instances that were not
                 * running will automatically pick-up new VMIDs because will
index 91c0b80..5a1a8df 100644 (file)
@@ -204,6 +204,7 @@ config S390
        select IOMMU_SUPPORT            if PCI
        select MMU_GATHER_NO_GATHER
        select MMU_GATHER_RCU_TABLE_FREE
+       select MMU_GATHER_MERGE_VMAS
        select MODULES_USE_ELF_RELA
        select NEED_DMA_MAP_STATE       if PCI
        select NEED_SG_DMA_LENGTH       if PCI
@@ -484,7 +485,6 @@ config KEXEC
 config KEXEC_FILE
        bool "kexec file based system call"
        select KEXEC_CORE
-       select BUILD_BIN2C
        depends on CRYPTO
        depends on CRYPTO_SHA256
        depends on CRYPTO_SHA256_S390
index 495c68a..4cb5d17 100644 (file)
@@ -82,7 +82,7 @@ endif
 
 ifdef CONFIG_EXPOLINE
   ifdef CONFIG_EXPOLINE_EXTERN
-    KBUILD_LDFLAGS_MODULE += arch/s390/lib/expoline.o
+    KBUILD_LDFLAGS_MODULE += arch/s390/lib/expoline/expoline.o
     CC_FLAGS_EXPOLINE := -mindirect-branch=thunk-extern
     CC_FLAGS_EXPOLINE += -mfunction-return=thunk-extern
   else
@@ -163,6 +163,12 @@ vdso_prepare: prepare0
        $(Q)$(MAKE) $(build)=arch/s390/kernel/vdso64 include/generated/vdso64-offsets.h
        $(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
                $(build)=arch/s390/kernel/vdso32 include/generated/vdso32-offsets.h)
+
+ifdef CONFIG_EXPOLINE_EXTERN
+modules_prepare: expoline_prepare
+expoline_prepare:
+       $(Q)$(MAKE) $(build)=arch/s390/lib/expoline arch/s390/lib/expoline/expoline.o
+endif
 endif
 
 # Don't use tabs in echo arguments
index 56007c7..1f2d409 100644 (file)
  *
  * Copyright IBM Corp. 2017, 2020
  * Author(s): Harald Freudenberger
- *
- * The s390_arch_random_generate() function may be called from random.c
- * in interrupt context. So this implementation does the best to be very
- * fast. There is a buffer of random data which is asynchronously checked
- * and filled by a workqueue thread.
- * If there are enough bytes in the buffer the s390_arch_random_generate()
- * just delivers these bytes. Otherwise false is returned until the
- * worker thread refills the buffer.
- * The worker fills the rng buffer by pulling fresh entropy from the
- * high quality (but slow) true hardware random generator. This entropy
- * is then spread over the buffer with an pseudo random generator PRNG.
- * As the arch_get_random_seed_long() fetches 8 bytes and the calling
- * function add_interrupt_randomness() counts this as 1 bit entropy the
- * distribution needs to make sure there is in fact 1 bit entropy contained
- * in 8 bytes of the buffer. The current values pull 32 byte entropy
- * and scatter this into a 2048 byte buffer. So 8 byte in the buffer
- * will contain 1 bit of entropy.
- * The worker thread is rescheduled based on the charge level of the
- * buffer but at least with 500 ms delay to avoid too much CPU consumption.
- * So the max. amount of rng data delivered via arch_get_random_seed is
- * limited to 4k bytes per second.
  */
 
 #include <linux/kernel.h>
 #include <linux/atomic.h>
 #include <linux/random.h>
-#include <linux/slab.h>
 #include <linux/static_key.h>
-#include <linux/workqueue.h>
-#include <linux/moduleparam.h>
 #include <asm/cpacf.h>
 
 DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
 
 atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0);
 EXPORT_SYMBOL(s390_arch_random_counter);
-
-#define ARCH_REFILL_TICKS (HZ/2)
-#define ARCH_PRNG_SEED_SIZE 32
-#define ARCH_RNG_BUF_SIZE 2048
-
-static DEFINE_SPINLOCK(arch_rng_lock);
-static u8 *arch_rng_buf;
-static unsigned int arch_rng_buf_idx;
-
-static void arch_rng_refill_buffer(struct work_struct *);
-static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
-
-bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
-{
-       /* max hunk is ARCH_RNG_BUF_SIZE */
-       if (nbytes > ARCH_RNG_BUF_SIZE)
-               return false;
-
-       /* lock rng buffer */
-       if (!spin_trylock(&arch_rng_lock))
-               return false;
-
-       /* try to resolve the requested amount of bytes from the buffer */
-       arch_rng_buf_idx -= nbytes;
-       if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) {
-               memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes);
-               atomic64_add(nbytes, &s390_arch_random_counter);
-               spin_unlock(&arch_rng_lock);
-               return true;
-       }
-
-       /* not enough bytes in rng buffer, refill is done asynchronously */
-       spin_unlock(&arch_rng_lock);
-
-       return false;
-}
-EXPORT_SYMBOL(s390_arch_random_generate);
-
-static void arch_rng_refill_buffer(struct work_struct *unused)
-{
-       unsigned int delay = ARCH_REFILL_TICKS;
-
-       spin_lock(&arch_rng_lock);
-       if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) {
-               /* buffer is exhausted and needs refill */
-               u8 seed[ARCH_PRNG_SEED_SIZE];
-               u8 prng_wa[240];
-               /* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */
-               cpacf_trng(NULL, 0, seed, sizeof(seed));
-               /* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */
-               memset(prng_wa, 0, sizeof(prng_wa));
-               cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
-                          &prng_wa, NULL, 0, seed, sizeof(seed));
-               cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
-                          &prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE, NULL, 0);
-               arch_rng_buf_idx = ARCH_RNG_BUF_SIZE;
-       }
-       delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) / ARCH_RNG_BUF_SIZE;
-       spin_unlock(&arch_rng_lock);
-
-       /* kick next check */
-       queue_delayed_work(system_long_wq, &arch_rng_work, delay);
-}
-
-/*
- * Here follows the implementation of s390_arch_get_random_long().
- *
- * The random longs to be pulled by arch_get_random_long() are
- * prepared in an 4K buffer which is filled from the NIST 800-90
- * compliant s390 drbg. By default the random long buffer is refilled
- * 256 times before the drbg itself needs a reseed. The reseed of the
- * drbg is done with 32 bytes fetched from the high quality (but slow)
- * trng which is assumed to deliver 100% entropy. So the 32 * 8 = 256
- * bits of entropy are spread over 256 * 4KB = 1MB serving 131072
- * arch_get_random_long() invocations before reseeded.
- *
- * How often the 4K random long buffer is refilled with the drbg
- * before the drbg is reseeded can be adjusted. There is a module
- * parameter 's390_arch_rnd_long_drbg_reseed' accessible via
- *   /sys/module/arch_random/parameters/rndlong_drbg_reseed
- * or as kernel command line parameter
- *   arch_random.rndlong_drbg_reseed=<value>
- * This parameter tells how often the drbg fills the 4K buffer before
- * it is re-seeded by fresh entropy from the trng.
- * A value of 16 results in reseeding the drbg at every 16 * 4 KB = 64
- * KB with 32 bytes of fresh entropy pulled from the trng. So a value
- * of 16 would result in 256 bits entropy per 64 KB.
- * A value of 256 results in 1MB of drbg output before a reseed of the
- * drbg is done. So this would spread the 256 bits of entropy among 1MB.
- * Setting this parameter to 0 forces the reseed to take place every
- * time the 4K buffer is depleted, so the entropy rises to 256 bits
- * entropy per 4K or 0.5 bit entropy per arch_get_random_long().  With
- * setting this parameter to negative values all this effort is
- * disabled, arch_get_random long() returns false and thus indicating
- * that the arch_get_random_long() feature is disabled at all.
- */
-
-static unsigned long rndlong_buf[512];
-static DEFINE_SPINLOCK(rndlong_lock);
-static int rndlong_buf_index;
-
-static int rndlong_drbg_reseed = 256;
-module_param_named(rndlong_drbg_reseed, rndlong_drbg_reseed, int, 0600);
-MODULE_PARM_DESC(rndlong_drbg_reseed, "s390 arch_get_random_long() drbg reseed");
-
-static inline void refill_rndlong_buf(void)
-{
-       static u8 prng_ws[240];
-       static int drbg_counter;
-
-       if (--drbg_counter < 0) {
-               /* need to re-seed the drbg */
-               u8 seed[32];
-
-               /* fetch seed from trng */
-               cpacf_trng(NULL, 0, seed, sizeof(seed));
-               /* seed drbg */
-               memset(prng_ws, 0, sizeof(prng_ws));
-               cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
-                          &prng_ws, NULL, 0, seed, sizeof(seed));
-               /* re-init counter for drbg */
-               drbg_counter = rndlong_drbg_reseed;
-       }
-
-       /* fill the arch_get_random_long buffer from drbg */
-       cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prng_ws,
-                  (u8 *) rndlong_buf, sizeof(rndlong_buf),
-                  NULL, 0);
-}
-
-bool s390_arch_get_random_long(unsigned long *v)
-{
-       bool rc = false;
-       unsigned long flags;
-
-       /* arch_get_random_long() disabled ? */
-       if (rndlong_drbg_reseed < 0)
-               return false;
-
-       /* try to lock the random long lock */
-       if (!spin_trylock_irqsave(&rndlong_lock, flags))
-               return false;
-
-       if (--rndlong_buf_index >= 0) {
-               /* deliver next long value from the buffer */
-               *v = rndlong_buf[rndlong_buf_index];
-               rc = true;
-               goto out;
-       }
-
-       /* buffer is depleted and needs refill */
-       if (in_interrupt()) {
-               /* delay refill in interrupt context to next caller */
-               rndlong_buf_index = 0;
-               goto out;
-       }
-
-       /* refill random long buffer */
-       refill_rndlong_buf();
-       rndlong_buf_index = ARRAY_SIZE(rndlong_buf);
-
-       /* and provide one random long */
-       *v = rndlong_buf[--rndlong_buf_index];
-       rc = true;
-
-out:
-       spin_unlock_irqrestore(&rndlong_lock, flags);
-       return rc;
-}
-EXPORT_SYMBOL(s390_arch_get_random_long);
-
-static int __init s390_arch_random_init(void)
-{
-       /* all the needed PRNO subfunctions available ? */
-       if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) &&
-           cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) {
-
-               /* alloc arch random working buffer */
-               arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL);
-               if (!arch_rng_buf)
-                       return -ENOMEM;
-
-               /* kick worker queue job to fill the random buffer */
-               queue_delayed_work(system_long_wq,
-                                  &arch_rng_work, ARCH_REFILL_TICKS);
-
-               /* enable arch random to the outside world */
-               static_branch_enable(&s390_arch_random_available);
-       }
-
-       return 0;
-}
-arch_initcall(s390_arch_random_init);
index 5dc712f..4120c42 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Kernel interface for the s390 arch_random_* functions
  *
- * Copyright IBM Corp. 2017, 2020
+ * Copyright IBM Corp. 2017, 2022
  *
  * Author: Harald Freudenberger <freude@de.ibm.com>
  *
 #ifdef CONFIG_ARCH_RANDOM
 
 #include <linux/static_key.h>
+#include <linux/preempt.h>
 #include <linux/atomic.h>
+#include <asm/cpacf.h>
 
 DECLARE_STATIC_KEY_FALSE(s390_arch_random_available);
 extern atomic64_t s390_arch_random_counter;
 
-bool s390_arch_get_random_long(unsigned long *v);
-bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
-
 static inline bool __must_check arch_get_random_long(unsigned long *v)
 {
-       if (static_branch_likely(&s390_arch_random_available))
-               return s390_arch_get_random_long(v);
        return false;
 }
 
@@ -36,16 +33,22 @@ static inline bool __must_check arch_get_random_int(unsigned int *v)
 
 static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
 {
-       if (static_branch_likely(&s390_arch_random_available)) {
-               return s390_arch_random_generate((u8 *)v, sizeof(*v));
+       if (static_branch_likely(&s390_arch_random_available) &&
+           in_task()) {
+               cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
+               atomic64_add(sizeof(*v), &s390_arch_random_counter);
+               return true;
        }
        return false;
 }
 
 static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
 {
-       if (static_branch_likely(&s390_arch_random_available)) {
-               return s390_arch_random_generate((u8 *)v, sizeof(*v));
+       if (static_branch_likely(&s390_arch_random_available) &&
+           in_task()) {
+               cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
+               atomic64_add(sizeof(*v), &s390_arch_random_counter);
+               return true;
        }
        return false;
 }
index d910d71..7e9e995 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef _ASM_S390_NOSPEC_ASM_H
 #define _ASM_S390_NOSPEC_ASM_H
 
-#include <asm/alternative-asm.h>
-#include <asm/asm-offsets.h>
 #include <asm/dwarf.h>
 
 #ifdef __ASSEMBLY__
index 54ae2dc..2f983e0 100644 (file)
@@ -133,9 +133,9 @@ struct slibe {
  * @sb_count: number of storage blocks
  * @sba: storage block element addresses
  * @dcount: size of storage block elements
- * @user0: user defineable value
- * @res4: reserved paramater
- * @user1: user defineable value
+ * @user0: user definable value
+ * @res4: reserved parameter
+ * @user1: user definable value
  */
 struct qaob {
        u64 res0[6];
index fe6407f..3a5c8fb 100644 (file)
@@ -27,9 +27,6 @@ static inline void tlb_flush(struct mmu_gather *tlb);
 static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
                                          struct page *page, int page_size);
 
-#define tlb_start_vma(tlb, vma)                        do { } while (0)
-#define tlb_end_vma(tlb, vma)                  do { } while (0)
-
 #define tlb_flush tlb_flush
 #define pte_free_tlb pte_free_tlb
 #define pmd_free_tlb pmd_free_tlb
index a2c1c55..28124d0 100644 (file)
@@ -219,6 +219,11 @@ ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
        unsigned long src;
        int rc;
 
+       if (!(iter_is_iovec(iter) || iov_iter_is_kvec(iter)))
+               return -EINVAL;
+       /* Multi-segment iterators are not supported */
+       if (iter->nr_segs > 1)
+               return -EINVAL;
        if (!csize)
                return 0;
        src = pfn_to_phys(pfn) + offset;
@@ -228,7 +233,10 @@ ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
                rc = copy_oldmem_user(iter->iov->iov_base, src, csize);
        else
                rc = copy_oldmem_kernel(iter->kvec->iov_base, src, csize);
-       return rc;
+       if (rc < 0)
+               return rc;
+       iov_iter_advance(iter, csize);
+       return csize;
 }
 
 /*
index 483ab5e..f7dd3c8 100644 (file)
@@ -516,6 +516,26 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
        return err;
 }
 
+/* Events CPU_CYLCES and INSTRUCTIONS can be submitted with two different
+ * attribute::type values:
+ * - PERF_TYPE_HARDWARE:
+ * - pmu->type:
+ * Handle both type of invocations identical. They address the same hardware.
+ * The result is different when event modifiers exclude_kernel and/or
+ * exclude_user are also set.
+ */
+static int cpumf_pmu_event_type(struct perf_event *event)
+{
+       u64 ev = event->attr.config;
+
+       if (cpumf_generic_events_basic[PERF_COUNT_HW_CPU_CYCLES] == ev ||
+           cpumf_generic_events_basic[PERF_COUNT_HW_INSTRUCTIONS] == ev ||
+           cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev ||
+           cpumf_generic_events_user[PERF_COUNT_HW_INSTRUCTIONS] == ev)
+               return PERF_TYPE_HARDWARE;
+       return PERF_TYPE_RAW;
+}
+
 static int cpumf_pmu_event_init(struct perf_event *event)
 {
        unsigned int type = event->attr.type;
@@ -525,7 +545,7 @@ static int cpumf_pmu_event_init(struct perf_event *event)
                err = __hw_perf_event_init(event, type);
        else if (event->pmu->type == type)
                /* Registered as unknown PMU */
-               err = __hw_perf_event_init(event, PERF_TYPE_RAW);
+               err = __hw_perf_event_init(event, cpumf_pmu_event_type(event));
        else
                return -ENOENT;
 
index 8c15459..b38b4ae 100644 (file)
@@ -193,8 +193,9 @@ static int paicrypt_event_init(struct perf_event *event)
        /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
        if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
                return -ENOENT;
-       /* PAI crypto event must be valid */
-       if (a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
+       /* PAI crypto event must be in valid range */
+       if (a->config < PAI_CRYPTO_BASE ||
+           a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
                return -EINVAL;
        /* Allow only CPU wide operation, no process context for now. */
        if (event->hw.target || event->cpu == -1)
@@ -208,6 +209,12 @@ static int paicrypt_event_init(struct perf_event *event)
        if (rc)
                return rc;
 
+       /* Event initialization sets last_tag to 0. When later on the events
+        * are deleted and re-added, do not reset the event count value to zero.
+        * Events are added, deleted and re-added when 2 or more events
+        * are active at the same time.
+        */
+       event->hw.last_tag = 0;
        cpump->event = event;
        event->destroy = paicrypt_event_destroy;
 
@@ -242,9 +249,12 @@ static void paicrypt_start(struct perf_event *event, int flags)
 {
        u64 sum;
 
-       sum = paicrypt_getall(event);           /* Get current value */
-       local64_set(&event->hw.prev_count, sum);
-       local64_set(&event->count, 0);
+       if (!event->hw.last_tag) {
+               event->hw.last_tag = 1;
+               sum = paicrypt_getall(event);           /* Get current value */
+               local64_set(&event->count, 0);
+               local64_set(&event->hw.prev_count, sum);
+       }
 }
 
 static int paicrypt_add(struct perf_event *event, int flags)
index 8d91ecc..0a37f5d 100644 (file)
@@ -875,6 +875,11 @@ static void __init setup_randomness(void)
        if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
                add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
        memblock_free(vmms, PAGE_SIZE);
+
+#ifdef CONFIG_ARCH_RANDOM
+       if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
+               static_branch_enable(&s390_arch_random_available);
+#endif
 }
 
 /*
index 5d415b3..580d2e3 100644 (file)
@@ -7,7 +7,6 @@ lib-y += delay.o string.o uaccess.o find.o spinlock.o
 obj-y += mem.o xor.o
 lib-$(CONFIG_KPROBES) += probes.o
 lib-$(CONFIG_UPROBES) += probes.o
-obj-$(CONFIG_EXPOLINE_EXTERN) += expoline.o
 obj-$(CONFIG_S390_KPROBES_SANITY_TEST) += test_kprobes_s390.o
 test_kprobes_s390-objs += test_kprobes_asm.o test_kprobes.o
 
@@ -22,3 +21,5 @@ obj-$(CONFIG_S390_MODULES_SANITY_TEST) += test_modules.o
 obj-$(CONFIG_S390_MODULES_SANITY_TEST_HELPERS) += test_modules_helpers.o
 
 lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+
+obj-$(CONFIG_EXPOLINE_EXTERN) += expoline/
diff --git a/arch/s390/lib/expoline/Makefile b/arch/s390/lib/expoline/Makefile
new file mode 100644 (file)
index 0000000..854631d
--- /dev/null
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-y += expoline.o
index 360ada8..d237bc6 100644 (file)
@@ -48,7 +48,6 @@ OBJCOPYFLAGS_purgatory.ro += --remove-section='.note.*'
 $(obj)/purgatory.ro: $(obj)/purgatory $(obj)/purgatory.chk FORCE
                $(call if_changed,objcopy)
 
-$(obj)/kexec-purgatory.o: $(obj)/kexec-purgatory.S $(obj)/purgatory.ro FORCE
-       $(call if_changed_rule,as_o_S)
+$(obj)/kexec-purgatory.o: $(obj)/purgatory.ro
 
-obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += kexec-purgatory.o
+obj-y += kexec-purgatory.o
index cf9a3ec..fba90e6 100644 (file)
@@ -271,8 +271,12 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
 #endif /* CONFIG_HAVE_IOREMAP_PROT */
 
 #else /* CONFIG_MMU */
-#define iounmap(addr)          do { } while (0)
-#define ioremap(offset, size)  ((void __iomem *)(unsigned long)(offset))
+static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
+{
+       return (void __iomem *)(unsigned long)offset;
+}
+
+static inline void iounmap(volatile void __iomem *addr) { }
 #endif /* CONFIG_MMU */
 
 #define ioremap_uc     ioremap
index ba449c4..4f7d1df 100644 (file)
@@ -67,6 +67,8 @@ config SPARC64
        select HAVE_KRETPROBES
        select HAVE_KPROBES
        select MMU_GATHER_RCU_TABLE_FREE if SMP
+       select MMU_GATHER_MERGE_VMAS
+       select MMU_GATHER_NO_FLUSH_CACHE
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
index 779a5a0..3037187 100644 (file)
@@ -22,8 +22,6 @@ void smp_flush_tlb_mm(struct mm_struct *mm);
 void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
 void flush_tlb_pending(void);
 
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma)  do { } while (0)
 #define tlb_flush(tlb) flush_tlb_pending()
 
 /*
index 95af12e..cdbd965 100644 (file)
@@ -102,8 +102,8 @@ extern unsigned long uml_physmem;
  * casting is the right thing, but 32-bit UML can't have 64-bit virtual
  * addresses
  */
-#define __pa(virt) to_phys((void *) (unsigned long) (virt))
-#define __va(phys) to_virt((unsigned long) (phys))
+#define __pa(virt) uml_to_phys((void *) (unsigned long) (virt))
+#define __va(phys) uml_to_virt((unsigned long) (phys))
 
 #define phys_to_pfn(p) ((p) >> PAGE_SHIFT)
 #define pfn_to_phys(pfn) PFN_PHYS(pfn)
index 4862c91..98aacd5 100644 (file)
@@ -9,12 +9,12 @@
 extern int phys_mapping(unsigned long phys, unsigned long long *offset_out);
 
 extern unsigned long uml_physmem;
-static inline unsigned long to_phys(void *virt)
+static inline unsigned long uml_to_phys(void *virt)
 {
        return(((unsigned long) virt) - uml_physmem);
 }
 
-static inline void *to_virt(unsigned long phys)
+static inline void *uml_to_virt(unsigned long phys)
 {
        return((void *) uml_physmem + phys);
 }
index 0760e24..9838967 100644 (file)
@@ -432,6 +432,10 @@ void apply_retpolines(s32 *start, s32 *end)
 {
 }
 
+void apply_returns(s32 *start, s32 *end)
+{
+}
+
 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
 {
 }
index 87d3129..c316c99 100644 (file)
@@ -251,7 +251,7 @@ static int userspace_tramp(void *stack)
        signal(SIGTERM, SIG_DFL);
        signal(SIGWINCH, SIG_IGN);
 
-       fd = phys_mapping(to_phys(__syscall_stub_start), &offset);
+       fd = phys_mapping(uml_to_phys(__syscall_stub_start), &offset);
        addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
                      PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
        if (addr == MAP_FAILED) {
@@ -261,7 +261,7 @@ static int userspace_tramp(void *stack)
        }
 
        if (stack != NULL) {
-               fd = phys_mapping(to_phys(stack), &offset);
+               fd = phys_mapping(uml_to_phys(stack), &offset);
                addr = mmap((void *) STUB_DATA,
                            UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
                            MAP_FIXED | MAP_SHARED, fd, offset);
@@ -534,7 +534,7 @@ int copy_context_skas0(unsigned long new_stack, int pid)
        struct stub_data *data = (struct stub_data *) current_stack;
        struct stub_data *child_data = (struct stub_data *) new_stack;
        unsigned long long new_offset;
-       int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset);
+       int new_fd = phys_mapping(uml_to_phys((void *)new_stack), &new_offset);
 
        /*
         * prepare offset and fd of child's stack as argument for parent's
index 677111a..f2e1d6c 100644 (file)
@@ -3,6 +3,4 @@ boot/compressed/vmlinux
 tools/test_get_len
 tools/insn_sanity
 tools/insn_decoder_test
-purgatory/kexec-purgatory.c
 purgatory/purgatory.ro
-
index be0b95e..5aa4c2e 100644 (file)
@@ -245,6 +245,7 @@ config X86
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
        select MMU_GATHER_RCU_TABLE_FREE        if PARAVIRT
+       select MMU_GATHER_MERGE_VMAS
        select HAVE_POSIX_CPU_TIMERS_TASK_WORK
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RELIABLE_STACKTRACE         if UNWINDER_ORC || STACK_VALIDATION
@@ -277,6 +278,7 @@ config X86
        select SYSCTL_EXCEPTION_TRACE
        select THREAD_INFO_IN_TASK
        select TRACE_IRQFLAGS_SUPPORT
+       select TRACE_IRQFLAGS_NMI_SUPPORT
        select USER_STACKTRACE_SUPPORT
        select VIRT_TO_BUS
        select HAVE_ARCH_KCSAN                  if X86_64
@@ -391,8 +393,8 @@ config PGTABLE_LEVELS
 
 config CC_HAS_SANE_STACKPROTECTOR
        bool
-       default $(success,$(srctree)/scripts/gcc-x86_64-has-stack-protector.sh $(CC)) if 64BIT
-       default $(success,$(srctree)/scripts/gcc-x86_32-has-stack-protector.sh $(CC))
+       default $(success,$(srctree)/scripts/gcc-x86_64-has-stack-protector.sh $(CC) $(CLANG_FLAGS)) if 64BIT
+       default $(success,$(srctree)/scripts/gcc-x86_32-has-stack-protector.sh $(CC) $(CLANG_FLAGS))
        help
          We have to make sure stack protector is unconditionally disabled if
          the compiler produces broken code or if it does not let us control
@@ -462,29 +464,6 @@ config GOLDFISH
        def_bool y
        depends on X86_GOLDFISH
 
-config RETPOLINE
-       bool "Avoid speculative indirect branches in kernel"
-       select OBJTOOL if HAVE_OBJTOOL
-       default y
-       help
-         Compile kernel with the retpoline compiler options to guard against
-         kernel-to-user data leaks by avoiding speculative indirect
-         branches. Requires a compiler with -mindirect-branch=thunk-extern
-         support for full protection. The kernel may run slower.
-
-config CC_HAS_SLS
-       def_bool $(cc-option,-mharden-sls=all)
-
-config SLS
-       bool "Mitigate Straight-Line-Speculation"
-       depends on CC_HAS_SLS && X86_64
-       select OBJTOOL if HAVE_OBJTOOL
-       default n
-       help
-         Compile the kernel with straight-line-speculation options to guard
-         against straight line speculation. The kernel image might be slightly
-         larger.
-
 config X86_CPU_RESCTRL
        bool "x86 CPU resource control support"
        depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
@@ -2032,7 +2011,7 @@ config KEXEC
 config KEXEC_FILE
        bool "kexec file based system call"
        select KEXEC_CORE
-       select BUILD_BIN2C
+       select HAVE_IMA_KEXEC if IMA
        depends on X86_64
        depends on CRYPTO=y
        depends on CRYPTO_SHA256=y
@@ -2453,6 +2432,91 @@ source "kernel/livepatch/Kconfig"
 
 endmenu
 
+config CC_HAS_SLS
+       def_bool $(cc-option,-mharden-sls=all)
+
+config CC_HAS_RETURN_THUNK
+       def_bool $(cc-option,-mfunction-return=thunk-extern)
+
+menuconfig SPECULATION_MITIGATIONS
+       bool "Mitigations for speculative execution vulnerabilities"
+       default y
+       help
+         Say Y here to enable options which enable mitigations for
+         speculative execution hardware vulnerabilities.
+
+         If you say N, all mitigations will be disabled. You really
+         should know what you are doing to say so.
+
+if SPECULATION_MITIGATIONS
+
+config PAGE_TABLE_ISOLATION
+       bool "Remove the kernel mapping in user mode"
+       default y
+       depends on (X86_64 || X86_PAE)
+       help
+         This feature reduces the number of hardware side channels by
+         ensuring that the majority of kernel addresses are not mapped
+         into userspace.
+
+         See Documentation/x86/pti.rst for more details.
+
+config RETPOLINE
+       bool "Avoid speculative indirect branches in kernel"
+       select OBJTOOL if HAVE_OBJTOOL
+       default y
+       help
+         Compile kernel with the retpoline compiler options to guard against
+         kernel-to-user data leaks by avoiding speculative indirect
+         branches. Requires a compiler with -mindirect-branch=thunk-extern
+         support for full protection. The kernel may run slower.
+
+config RETHUNK
+       bool "Enable return-thunks"
+       depends on RETPOLINE && CC_HAS_RETURN_THUNK
+       select OBJTOOL if HAVE_OBJTOOL
+       default y if X86_64
+       help
+         Compile the kernel with the return-thunks compiler option to guard
+         against kernel-to-user data leaks by avoiding return speculation.
+         Requires a compiler with -mfunction-return=thunk-extern
+         support for full protection. The kernel may run slower.
+
+config CPU_UNRET_ENTRY
+       bool "Enable UNRET on kernel entry"
+       depends on CPU_SUP_AMD && RETHUNK && X86_64
+       default y
+       help
+         Compile the kernel with support for the retbleed=unret mitigation.
+
+config CPU_IBPB_ENTRY
+       bool "Enable IBPB on kernel entry"
+       depends on CPU_SUP_AMD && X86_64
+       default y
+       help
+         Compile the kernel with support for the retbleed=ibpb mitigation.
+
+config CPU_IBRS_ENTRY
+       bool "Enable IBRS on kernel entry"
+       depends on CPU_SUP_INTEL && X86_64
+       default y
+       help
+         Compile the kernel with support for the spectre_v2=ibrs mitigation.
+         This mitigates both spectre_v2 and retbleed at great cost to
+         performance.
+
+config SLS
+       bool "Mitigate Straight-Line-Speculation"
+       depends on CC_HAS_SLS && X86_64
+       select OBJTOOL if HAVE_OBJTOOL
+       default n
+       help
+         Compile the kernel with straight-line-speculation options to guard
+         against straight line speculation. The kernel image might be slightly
+         larger.
+
+endif
+
 config ARCH_HAS_ADD_PAGES
        def_bool y
        depends on ARCH_ENABLE_MEMORY_HOTPLUG
index 340399f..bdfe08f 100644 (file)
@@ -1,8 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
 
-config TRACE_IRQFLAGS_NMI_SUPPORT
-       def_bool y
-
 config EARLY_PRINTK_USB
        bool
 
index a74886a..7854685 100644 (file)
@@ -21,6 +21,13 @@ ifdef CONFIG_CC_IS_CLANG
 RETPOLINE_CFLAGS       := -mretpoline-external-thunk
 RETPOLINE_VDSO_CFLAGS  := -mretpoline
 endif
+
+ifdef CONFIG_RETHUNK
+RETHUNK_CFLAGS         := -mfunction-return=thunk-extern
+RETPOLINE_CFLAGS       += $(RETHUNK_CFLAGS)
+endif
+
+export RETHUNK_CFLAGS
 export RETPOLINE_CFLAGS
 export RETPOLINE_VDSO_CFLAGS
 
index 44c350d..d4a314c 100644 (file)
@@ -110,6 +110,7 @@ void kernel_add_identity_map(unsigned long start, unsigned long end)
 void initialize_identity_maps(void *rmode)
 {
        unsigned long cmdline;
+       struct setup_data *sd;
 
        /* Exclude the encryption mask from __PHYSICAL_MASK */
        physical_mask &= ~sme_me_mask;
@@ -163,6 +164,18 @@ void initialize_identity_maps(void *rmode)
        cmdline = get_cmd_line_ptr();
        kernel_add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
 
+       /*
+        * Also map the setup_data entries passed via boot_params in case they
+        * need to be accessed by uncompressed kernel via the identity mapping.
+        */
+       sd = (struct setup_data *)boot_params->hdr.setup_data;
+       while (sd) {
+               unsigned long sd_addr = (unsigned long)sd;
+
+               kernel_add_identity_map(sd_addr, sd_addr + sizeof(*sd) + sd->len);
+               sd = (struct setup_data *)sd->next;
+       }
+
        sev_prep_identity_maps(top_level_pgt);
 
        /* Load the new page-table. */
index 03deb4d..928dcf7 100644 (file)
@@ -124,6 +124,51 @@ static u64 get_cc_mask(void)
        return BIT_ULL(gpa_width - 1);
 }
 
+/*
+ * The TDX module spec states that #VE may be injected for a limited set of
+ * reasons:
+ *
+ *  - Emulation of the architectural #VE injection on EPT violation;
+ *
+ *  - As a result of guest TD execution of a disallowed instruction,
+ *    a disallowed MSR access, or CPUID virtualization;
+ *
+ *  - A notification to the guest TD about anomalous behavior;
+ *
+ * The last one is opt-in and is not used by the kernel.
+ *
+ * The Intel Software Developer's Manual describes cases when instruction
+ * length field can be used in section "Information for VM Exits Due to
+ * Instruction Execution".
+ *
+ * For TDX, it ultimately means GET_VEINFO provides reliable instruction length
+ * information if #VE occurred due to instruction execution, but not for EPT
+ * violations.
+ */
+static int ve_instr_len(struct ve_info *ve)
+{
+       switch (ve->exit_reason) {
+       case EXIT_REASON_HLT:
+       case EXIT_REASON_MSR_READ:
+       case EXIT_REASON_MSR_WRITE:
+       case EXIT_REASON_CPUID:
+       case EXIT_REASON_IO_INSTRUCTION:
+               /* It is safe to use ve->instr_len for #VE due instructions */
+               return ve->instr_len;
+       case EXIT_REASON_EPT_VIOLATION:
+               /*
+                * For EPT violations, ve->insn_len is not defined. For those,
+                * the kernel must decode instructions manually and should not
+                * be using this function.
+                */
+               WARN_ONCE(1, "ve->instr_len is not defined for EPT violations");
+               return 0;
+       default:
+               WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason);
+               return ve->instr_len;
+       }
+}
+
 static u64 __cpuidle __halt(const bool irq_disabled, const bool do_sti)
 {
        struct tdx_hypercall_args args = {
@@ -147,7 +192,7 @@ static u64 __cpuidle __halt(const bool irq_disabled, const bool do_sti)
        return __tdx_hypercall(&args, do_sti ? TDX_HCALL_ISSUE_STI : 0);
 }
 
-static bool handle_halt(void)
+static int handle_halt(struct ve_info *ve)
 {
        /*
         * Since non safe halt is mainly used in CPU offlining
@@ -158,9 +203,9 @@ static bool handle_halt(void)
        const bool do_sti = false;
 
        if (__halt(irq_disabled, do_sti))
-               return false;
+               return -EIO;
 
-       return true;
+       return ve_instr_len(ve);
 }
 
 void __cpuidle tdx_safe_halt(void)
@@ -180,7 +225,7 @@ void __cpuidle tdx_safe_halt(void)
                WARN_ONCE(1, "HLT instruction emulation failed\n");
 }
 
-static bool read_msr(struct pt_regs *regs)
+static int read_msr(struct pt_regs *regs, struct ve_info *ve)
 {
        struct tdx_hypercall_args args = {
                .r10 = TDX_HYPERCALL_STANDARD,
@@ -194,14 +239,14 @@ static bool read_msr(struct pt_regs *regs)
         * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>".
         */
        if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT))
-               return false;
+               return -EIO;
 
        regs->ax = lower_32_bits(args.r11);
        regs->dx = upper_32_bits(args.r11);
-       return true;
+       return ve_instr_len(ve);
 }
 
-static bool write_msr(struct pt_regs *regs)
+static int write_msr(struct pt_regs *regs, struct ve_info *ve)
 {
        struct tdx_hypercall_args args = {
                .r10 = TDX_HYPERCALL_STANDARD,
@@ -215,10 +260,13 @@ static bool write_msr(struct pt_regs *regs)
         * can be found in TDX Guest-Host-Communication Interface
         * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>".
         */
-       return !__tdx_hypercall(&args, 0);
+       if (__tdx_hypercall(&args, 0))
+               return -EIO;
+
+       return ve_instr_len(ve);
 }
 
-static bool handle_cpuid(struct pt_regs *regs)
+static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve)
 {
        struct tdx_hypercall_args args = {
                .r10 = TDX_HYPERCALL_STANDARD,
@@ -236,7 +284,7 @@ static bool handle_cpuid(struct pt_regs *regs)
         */
        if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) {
                regs->ax = regs->bx = regs->cx = regs->dx = 0;
-               return true;
+               return ve_instr_len(ve);
        }
 
        /*
@@ -245,7 +293,7 @@ static bool handle_cpuid(struct pt_regs *regs)
         * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>".
         */
        if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT))
-               return false;
+               return -EIO;
 
        /*
         * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of
@@ -257,7 +305,7 @@ static bool handle_cpuid(struct pt_regs *regs)
        regs->cx = args.r14;
        regs->dx = args.r15;
 
-       return true;
+       return ve_instr_len(ve);
 }
 
 static bool mmio_read(int size, unsigned long addr, unsigned long *val)
@@ -283,10 +331,10 @@ static bool mmio_write(int size, unsigned long addr, unsigned long val)
                               EPT_WRITE, addr, val);
 }
 
-static bool handle_mmio(struct pt_regs *regs, struct ve_info *ve)
+static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
 {
+       unsigned long *reg, val, vaddr;
        char buffer[MAX_INSN_SIZE];
-       unsigned long *reg, val;
        struct insn insn = {};
        enum mmio_type mmio;
        int size, extend_size;
@@ -294,34 +342,49 @@ static bool handle_mmio(struct pt_regs *regs, struct ve_info *ve)
 
        /* Only in-kernel MMIO is supported */
        if (WARN_ON_ONCE(user_mode(regs)))
-               return false;
+               return -EFAULT;
 
        if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE))
-               return false;
+               return -EFAULT;
 
        if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64))
-               return false;
+               return -EINVAL;
 
        mmio = insn_decode_mmio(&insn, &size);
        if (WARN_ON_ONCE(mmio == MMIO_DECODE_FAILED))
-               return false;
+               return -EINVAL;
 
        if (mmio != MMIO_WRITE_IMM && mmio != MMIO_MOVS) {
                reg = insn_get_modrm_reg_ptr(&insn, regs);
                if (!reg)
-                       return false;
+                       return -EINVAL;
        }
 
-       ve->instr_len = insn.length;
+       /*
+        * Reject EPT violation #VEs that split pages.
+        *
+        * MMIO accesses are supposed to be naturally aligned and therefore
+        * never cross page boundaries. Seeing split page accesses indicates
+        * a bug or a load_unaligned_zeropad() that stepped into an MMIO page.
+        *
+        * load_unaligned_zeropad() will recover using exception fixups.
+        */
+       vaddr = (unsigned long)insn_get_addr_ref(&insn, regs);
+       if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
+               return -EFAULT;
 
        /* Handle writes first */
        switch (mmio) {
        case MMIO_WRITE:
                memcpy(&val, reg, size);
-               return mmio_write(size, ve->gpa, val);
+               if (!mmio_write(size, ve->gpa, val))
+                       return -EIO;
+               return insn.length;
        case MMIO_WRITE_IMM:
                val = insn.immediate.value;
-               return mmio_write(size, ve->gpa, val);
+               if (!mmio_write(size, ve->gpa, val))
+                       return -EIO;
+               return insn.length;
        case MMIO_READ:
        case MMIO_READ_ZERO_EXTEND:
        case MMIO_READ_SIGN_EXTEND:
@@ -334,15 +397,15 @@ static bool handle_mmio(struct pt_regs *regs, struct ve_info *ve)
                 * decoded or handled properly. It was likely not using io.h
                 * helpers or accessed MMIO accidentally.
                 */
-               return false;
+               return -EINVAL;
        default:
                WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?");
-               return false;
+               return -EINVAL;
        }
 
        /* Handle reads */
        if (!mmio_read(size, ve->gpa, &val))
-               return false;
+               return -EIO;
 
        switch (mmio) {
        case MMIO_READ:
@@ -364,13 +427,13 @@ static bool handle_mmio(struct pt_regs *regs, struct ve_info *ve)
        default:
                /* All other cases has to be covered with the first switch() */
                WARN_ON_ONCE(1);
-               return false;
+               return -EINVAL;
        }
 
        if (extend_size)
                memset(reg, extend_val, extend_size);
        memcpy(reg, &val, size);
-       return true;
+       return insn.length;
 }
 
 static bool handle_in(struct pt_regs *regs, int size, int port)
@@ -421,13 +484,14 @@ static bool handle_out(struct pt_regs *regs, int size, int port)
  *
  * Return True on success or False on failure.
  */
-static bool handle_io(struct pt_regs *regs, u32 exit_qual)
+static int handle_io(struct pt_regs *regs, struct ve_info *ve)
 {
+       u32 exit_qual = ve->exit_qual;
        int size, port;
-       bool in;
+       bool in, ret;
 
        if (VE_IS_IO_STRING(exit_qual))
-               return false;
+               return -EIO;
 
        in   = VE_IS_IO_IN(exit_qual);
        size = VE_GET_IO_SIZE(exit_qual);
@@ -435,9 +499,13 @@ static bool handle_io(struct pt_regs *regs, u32 exit_qual)
 
 
        if (in)
-               return handle_in(regs, size, port);
+               ret = handle_in(regs, size, port);
        else
-               return handle_out(regs, size, port);
+               ret = handle_out(regs, size, port);
+       if (!ret)
+               return -EIO;
+
+       return ve_instr_len(ve);
 }
 
 /*
@@ -447,13 +515,19 @@ static bool handle_io(struct pt_regs *regs, u32 exit_qual)
 __init bool tdx_early_handle_ve(struct pt_regs *regs)
 {
        struct ve_info ve;
+       int insn_len;
 
        tdx_get_ve_info(&ve);
 
        if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION)
                return false;
 
-       return handle_io(regs, ve.exit_qual);
+       insn_len = handle_io(regs, &ve);
+       if (insn_len < 0)
+               return false;
+
+       regs->ip += insn_len;
+       return true;
 }
 
 void tdx_get_ve_info(struct ve_info *ve)
@@ -486,54 +560,65 @@ void tdx_get_ve_info(struct ve_info *ve)
        ve->instr_info  = upper_32_bits(out.r10);
 }
 
-/* Handle the user initiated #VE */
-static bool virt_exception_user(struct pt_regs *regs, struct ve_info *ve)
+/*
+ * Handle the user initiated #VE.
+ *
+ * On success, returns the number of bytes RIP should be incremented (>=0)
+ * or -errno on error.
+ */
+static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve)
 {
        switch (ve->exit_reason) {
        case EXIT_REASON_CPUID:
-               return handle_cpuid(regs);
+               return handle_cpuid(regs, ve);
        default:
                pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
-               return false;
+               return -EIO;
        }
 }
 
-/* Handle the kernel #VE */
-static bool virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve)
+/*
+ * Handle the kernel #VE.
+ *
+ * On success, returns the number of bytes RIP should be incremented (>=0)
+ * or -errno on error.
+ */
+static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve)
 {
        switch (ve->exit_reason) {
        case EXIT_REASON_HLT:
-               return handle_halt();
+               return handle_halt(ve);
        case EXIT_REASON_MSR_READ:
-               return read_msr(regs);
+               return read_msr(regs, ve);
        case EXIT_REASON_MSR_WRITE:
-               return write_msr(regs);
+               return write_msr(regs, ve);
        case EXIT_REASON_CPUID:
-               return handle_cpuid(regs);
+               return handle_cpuid(regs, ve);
        case EXIT_REASON_EPT_VIOLATION:
                return handle_mmio(regs, ve);
        case EXIT_REASON_IO_INSTRUCTION:
-               return handle_io(regs, ve->exit_qual);
+               return handle_io(regs, ve);
        default:
                pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
-               return false;
+               return -EIO;
        }
 }
 
 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve)
 {
-       bool ret;
+       int insn_len;
 
        if (user_mode(regs))
-               ret = virt_exception_user(regs, ve);
+               insn_len = virt_exception_user(regs, ve);
        else
-               ret = virt_exception_kernel(regs, ve);
+               insn_len = virt_exception_kernel(regs, ve);
+       if (insn_len < 0)
+               return false;
 
        /* After successful #VE handling, move the IP */
-       if (ret)
-               regs->ip += ve->instr_len;
+       regs->ip += insn_len;
 
-       return ret;
+       return true;
 }
 
 static bool tdx_tlb_flush_required(bool private)
index 7fec5dc..eeadbd7 100644 (file)
@@ -11,7 +11,7 @@ CFLAGS_REMOVE_common.o                = $(CC_FLAGS_FTRACE)
 
 CFLAGS_common.o                        += -fno-stack-protector
 
-obj-y                          := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
+obj-y                          := entry.o entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
 obj-y                          += common.o
 
 obj-y                          += vdso/
index 29b36e9..f690762 100644 (file)
@@ -7,6 +7,8 @@
 #include <asm/asm-offsets.h>
 #include <asm/processor-flags.h>
 #include <asm/ptrace-abi.h>
+#include <asm/msr.h>
+#include <asm/nospec-branch.h>
 
 /*
 
@@ -283,6 +285,66 @@ For 32-bit we have the following conventions - kernel is built with
 #endif
 
 /*
+ * IBRS kernel mitigation for Spectre_v2.
+ *
+ * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers
+ * the regs it uses (AX, CX, DX). Must be called before the first RET
+ * instruction (NOTE! UNTRAIN_RET includes a RET instruction)
+ *
+ * The optional argument is used to save/restore the current value,
+ * which is used on the paranoid paths.
+ *
+ * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set.
+ */
+.macro IBRS_ENTER save_reg
+#ifdef CONFIG_CPU_IBRS_ENTRY
+       ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
+       movl    $MSR_IA32_SPEC_CTRL, %ecx
+
+.ifnb \save_reg
+       rdmsr
+       shl     $32, %rdx
+       or      %rdx, %rax
+       mov     %rax, \save_reg
+       test    $SPEC_CTRL_IBRS, %eax
+       jz      .Ldo_wrmsr_\@
+       lfence
+       jmp     .Lend_\@
+.Ldo_wrmsr_\@:
+.endif
+
+       movq    PER_CPU_VAR(x86_spec_ctrl_current), %rdx
+       movl    %edx, %eax
+       shr     $32, %rdx
+       wrmsr
+.Lend_\@:
+#endif
+.endm
+
+/*
+ * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX)
+ * regs. Must be called after the last RET.
+ */
+.macro IBRS_EXIT save_reg
+#ifdef CONFIG_CPU_IBRS_ENTRY
+       ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
+       movl    $MSR_IA32_SPEC_CTRL, %ecx
+
+.ifnb \save_reg
+       mov     \save_reg, %rdx
+.else
+       movq    PER_CPU_VAR(x86_spec_ctrl_current), %rdx
+       andl    $(~SPEC_CTRL_IBRS), %edx
+.endif
+
+       movl    %edx, %eax
+       shr     $32, %rdx
+       wrmsr
+.Lend_\@:
+#endif
+.endm
+
+/*
  * Mitigate Spectre v1 for conditional swapgs code paths.
  *
  * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S
new file mode 100644 (file)
index 0000000..bfb7bcb
--- /dev/null
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common place for both 32- and 64-bit entry routines.
+ */
+
+#include <linux/linkage.h>
+#include <asm/export.h>
+#include <asm/msr-index.h>
+
+.pushsection .noinstr.text, "ax"
+
+SYM_FUNC_START(entry_ibpb)
+       movl    $MSR_IA32_PRED_CMD, %ecx
+       movl    $PRED_CMD_IBPB, %eax
+       xorl    %edx, %edx
+       wrmsr
+       RET
+SYM_FUNC_END(entry_ibpb)
+/* For KVM */
+EXPORT_SYMBOL_GPL(entry_ibpb);
+
+.popsection
index 8874208..e309e71 100644 (file)
@@ -698,7 +698,6 @@ SYM_CODE_START(__switch_to_asm)
        movl    %ebx, PER_CPU_VAR(__stack_chk_guard)
 #endif
 
-#ifdef CONFIG_RETPOLINE
        /*
         * When switching from a shallower to a deeper call stack
         * the RSB may either underflow or use entries populated
@@ -707,7 +706,6 @@ SYM_CODE_START(__switch_to_asm)
         * speculative execution to prevent attack.
         */
        FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
-#endif
 
        /* Restore flags or the incoming task to restore AC state. */
        popfl
index 4300ba4..9953d96 100644 (file)
@@ -85,7 +85,7 @@
  */
 
 SYM_CODE_START(entry_SYSCALL_64)
-       UNWIND_HINT_EMPTY
+       UNWIND_HINT_ENTRY
        ENDBR
 
        swapgs
@@ -112,6 +112,11 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
        movq    %rsp, %rdi
        /* Sign extend the lower 32bit as syscall numbers are treated as int */
        movslq  %eax, %rsi
+
+       /* clobbers %rax, make sure it is after saving the syscall nr */
+       IBRS_ENTER
+       UNTRAIN_RET
+
        call    do_syscall_64           /* returns with IRQs disabled */
 
        /*
@@ -191,6 +196,7 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
         * perf profiles. Nothing jumps here.
         */
 syscall_return_via_sysret:
+       IBRS_EXIT
        POP_REGS pop_rdi=0
 
        /*
@@ -249,7 +255,6 @@ SYM_FUNC_START(__switch_to_asm)
        movq    %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset
 #endif
 
-#ifdef CONFIG_RETPOLINE
        /*
         * When switching from a shallower to a deeper call stack
         * the RSB may either underflow or use entries populated
@@ -258,7 +263,6 @@ SYM_FUNC_START(__switch_to_asm)
         * speculative execution to prevent attack.
         */
        FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
-#endif
 
        /* restore callee-saved registers */
        popq    %r15
@@ -322,13 +326,13 @@ SYM_CODE_END(ret_from_fork)
 #endif
 .endm
 
-/* Save all registers in pt_regs */
-SYM_CODE_START_LOCAL(push_and_clear_regs)
+SYM_CODE_START_LOCAL(xen_error_entry)
        UNWIND_HINT_FUNC
        PUSH_AND_CLEAR_REGS save_ret=1
        ENCODE_FRAME_POINTER 8
+       UNTRAIN_RET
        RET
-SYM_CODE_END(push_and_clear_regs)
+SYM_CODE_END(xen_error_entry)
 
 /**
  * idtentry_body - Macro to emit code calling the C function
@@ -337,9 +341,6 @@ SYM_CODE_END(push_and_clear_regs)
  */
 .macro idtentry_body cfunc has_error_code:req
 
-       call push_and_clear_regs
-       UNWIND_HINT_REGS
-
        /*
         * Call error_entry() and switch to the task stack if from userspace.
         *
@@ -349,7 +350,7 @@ SYM_CODE_END(push_and_clear_regs)
         * switch the CR3.  So it can skip invoking error_entry().
         */
        ALTERNATIVE "call error_entry; movq %rax, %rsp", \
-               "", X86_FEATURE_XENPV
+                   "call xen_error_entry", X86_FEATURE_XENPV
 
        ENCODE_FRAME_POINTER
        UNWIND_HINT_REGS
@@ -612,6 +613,7 @@ __irqentry_text_end:
 
 SYM_CODE_START_LOCAL(common_interrupt_return)
 SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
+       IBRS_EXIT
 #ifdef CONFIG_DEBUG_ENTRY
        /* Assert that pt_regs indicates user mode. */
        testb   $3, CS(%rsp)
@@ -897,6 +899,9 @@ SYM_CODE_END(xen_failsafe_callback)
  *              1 -> no SWAPGS on exit
  *
  *     Y        GSBASE value at entry, must be restored in paranoid_exit
+ *
+ * R14 - old CR3
+ * R15 - old SPEC_CTRL
  */
 SYM_CODE_START_LOCAL(paranoid_entry)
        UNWIND_HINT_FUNC
@@ -940,7 +945,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
         * is needed here.
         */
        SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
-       RET
+       jmp .Lparanoid_gsbase_done
 
 .Lparanoid_entry_checkgs:
        /* EBX = 1 -> kernel GSBASE active, no restore required */
@@ -959,8 +964,16 @@ SYM_CODE_START_LOCAL(paranoid_entry)
        xorl    %ebx, %ebx
        swapgs
 .Lparanoid_kernel_gsbase:
-
        FENCE_SWAPGS_KERNEL_ENTRY
+.Lparanoid_gsbase_done:
+
+       /*
+        * Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like
+        * CR3 above, keep the old value in a callee saved register.
+        */
+       IBRS_ENTER save_reg=%r15
+       UNTRAIN_RET
+
        RET
 SYM_CODE_END(paranoid_entry)
 
@@ -982,9 +995,19 @@ SYM_CODE_END(paranoid_entry)
  *              1 -> no SWAPGS on exit
  *
  *     Y        User space GSBASE, must be restored unconditionally
+ *
+ * R14 - old CR3
+ * R15 - old SPEC_CTRL
  */
 SYM_CODE_START_LOCAL(paranoid_exit)
        UNWIND_HINT_REGS
+
+       /*
+        * Must restore IBRS state before both CR3 and %GS since we need access
+        * to the per-CPU x86_spec_ctrl_shadow variable.
+        */
+       IBRS_EXIT save_reg=%r15
+
        /*
         * The order of operations is important. RESTORE_CR3 requires
         * kernel GSBASE.
@@ -1017,6 +1040,10 @@ SYM_CODE_END(paranoid_exit)
  */
 SYM_CODE_START_LOCAL(error_entry)
        UNWIND_HINT_FUNC
+
+       PUSH_AND_CLEAR_REGS save_ret=1
+       ENCODE_FRAME_POINTER 8
+
        testb   $3, CS+8(%rsp)
        jz      .Lerror_kernelspace
 
@@ -1028,9 +1055,12 @@ SYM_CODE_START_LOCAL(error_entry)
        FENCE_SWAPGS_USER_ENTRY
        /* We have user CR3.  Change to kernel CR3. */
        SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+       IBRS_ENTER
+       UNTRAIN_RET
 
        leaq    8(%rsp), %rdi                   /* arg0 = pt_regs pointer */
 .Lerror_entry_from_usermode_after_swapgs:
+
        /* Put us onto the real thread stack. */
        call    sync_regs
        RET
@@ -1065,6 +1095,7 @@ SYM_CODE_START_LOCAL(error_entry)
 .Lerror_entry_done_lfence:
        FENCE_SWAPGS_KERNEL_ENTRY
        leaq    8(%rsp), %rax                   /* return pt_regs pointer */
+       ANNOTATE_UNRET_END
        RET
 
 .Lbstep_iret:
@@ -1080,6 +1111,8 @@ SYM_CODE_START_LOCAL(error_entry)
        swapgs
        FENCE_SWAPGS_USER_ENTRY
        SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+       IBRS_ENTER
+       UNTRAIN_RET
 
        /*
         * Pretend that the exception came from user mode: set up pt_regs
@@ -1185,6 +1218,9 @@ SYM_CODE_START(asm_exc_nmi)
        PUSH_AND_CLEAR_REGS rdx=(%rdx)
        ENCODE_FRAME_POINTER
 
+       IBRS_ENTER
+       UNTRAIN_RET
+
        /*
         * At this point we no longer need to worry about stack damage
         * due to nesting -- we're on the normal thread stack and we're
@@ -1409,6 +1445,9 @@ end_repeat_nmi:
        movq    $-1, %rsi
        call    exc_nmi
 
+       /* Always restore stashed SPEC_CTRL value (see paranoid_entry) */
+       IBRS_EXIT save_reg=%r15
+
        /* Always restore stashed CR3 value (see paranoid_entry) */
        RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
 
index d105274..682338e 100644 (file)
@@ -4,7 +4,6 @@
  *
  * Copyright 2000-2002 Andi Kleen, SuSE Labs.
  */
-#include "calling.h"
 #include <asm/asm-offsets.h>
 #include <asm/current.h>
 #include <asm/errno.h>
 #include <asm/irqflags.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
+#include <asm/nospec-branch.h>
 #include <linux/linkage.h>
 #include <linux/err.h>
 
+#include "calling.h"
+
        .section .entry.text, "ax"
 
 /*
@@ -47,7 +49,7 @@
  * 0(%ebp) arg6
  */
 SYM_CODE_START(entry_SYSENTER_compat)
-       UNWIND_HINT_EMPTY
+       UNWIND_HINT_ENTRY
        ENDBR
        /* Interrupts are off on entry. */
        swapgs
@@ -88,6 +90,9 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
 
        cld
 
+       IBRS_ENTER
+       UNTRAIN_RET
+
        /*
         * SYSENTER doesn't filter flags, so we need to clear NT and AC
         * ourselves.  To save a few cycles, we can check whether
@@ -174,7 +179,7 @@ SYM_CODE_END(entry_SYSENTER_compat)
  * 0(%esp) arg6
  */
 SYM_CODE_START(entry_SYSCALL_compat)
-       UNWIND_HINT_EMPTY
+       UNWIND_HINT_ENTRY
        ENDBR
        /* Interrupts are off on entry. */
        swapgs
@@ -203,6 +208,9 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL)
        PUSH_AND_CLEAR_REGS rcx=%rbp rax=$-ENOSYS
        UNWIND_HINT_REGS
 
+       IBRS_ENTER
+       UNTRAIN_RET
+
        movq    %rsp, %rdi
        call    do_fast_syscall_32
        /* XEN PV guests always use IRET path */
@@ -217,6 +225,8 @@ sysret32_from_system_call:
         */
        STACKLEAK_ERASE
 
+       IBRS_EXIT
+
        movq    RBX(%rsp), %rbx         /* pt_regs->rbx */
        movq    RBP(%rsp), %rbp         /* pt_regs->rbp */
        movq    EFLAGS(%rsp), %r11      /* pt_regs->flags (in r11) */
@@ -295,7 +305,7 @@ SYM_CODE_END(entry_SYSCALL_compat)
  * ebp  arg6
  */
 SYM_CODE_START(entry_INT80_compat)
-       UNWIND_HINT_EMPTY
+       UNWIND_HINT_ENTRY
        ENDBR
        /*
         * Interrupts are off on entry.
@@ -337,6 +347,9 @@ SYM_CODE_START(entry_INT80_compat)
 
        cld
 
+       IBRS_ENTER
+       UNTRAIN_RET
+
        movq    %rsp, %rdi
        call    do_int80_syscall_32
        jmp     swapgs_restore_regs_and_return_to_usermode
index c2a8b76..76cd790 100644 (file)
@@ -92,6 +92,7 @@ endif
 endif
 
 $(vobjs): KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO) $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
+$(vobjs): KBUILD_AFLAGS += -DBUILD_VDSO
 
 #
 # vDSO code runs in userspace and -pg doesn't help with profiling anyway.
index 15e3515..ef2dd18 100644 (file)
@@ -19,17 +19,20 @@ __vsyscall_page:
 
        mov $__NR_gettimeofday, %rax
        syscall
-       RET
+       ret
+       int3
 
        .balign 1024, 0xcc
        mov $__NR_time, %rax
        syscall
-       RET
+       ret
+       int3
 
        .balign 1024, 0xcc
        mov $__NR_getcpu, %rax
        syscall
-       RET
+       ret
+       int3
 
        .balign 4096, 0xcc
 
index 13179f3..4f70fb6 100644 (file)
@@ -278,9 +278,9 @@ enum {
 };
 
 /*
- * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
- * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
- * TSX is not supported they have no consistent behavior:
+ * For format LBR_FORMAT_EIP_FLAGS2, bits 61:62 in MSR_LAST_BRANCH_FROM_x
+ * are the TSX flags when TSX is supported, but when TSX is not supported
+ * they have no consistent behavior:
  *
  *   - For wrmsr(), bits 61:62 are considered part of the sign extension.
  *   - For HW updates (branch captures) bits 61:62 are always OFF and are not
@@ -288,7 +288,7 @@ enum {
  *
  * Therefore, if:
  *
- *   1) LBR has TSX format
+ *   1) LBR format LBR_FORMAT_EIP_FLAGS2
  *   2) CPU has no TSX support enabled
  *
  * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
@@ -300,7 +300,7 @@ static inline bool lbr_from_signext_quirk_needed(void)
        bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
                           boot_cpu_has(X86_FEATURE_RTM);
 
-       return !tsx_support && x86_pmu.lbr_has_tsx;
+       return !tsx_support;
 }
 
 static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
@@ -1609,9 +1609,6 @@ void intel_pmu_lbr_init_hsw(void)
        x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
 
        x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
-
-       if (lbr_from_signext_quirk_needed())
-               static_branch_enable(&lbr_from_quirk_key);
 }
 
 /* skylake */
@@ -1702,7 +1699,11 @@ void intel_pmu_lbr_init(void)
        switch (x86_pmu.intel_cap.lbr_format) {
        case LBR_FORMAT_EIP_FLAGS2:
                x86_pmu.lbr_has_tsx = 1;
-               fallthrough;
+               x86_pmu.lbr_from_flags = 1;
+               if (lbr_from_signext_quirk_needed())
+                       static_branch_enable(&lbr_from_quirk_key);
+               break;
+
        case LBR_FORMAT_EIP_FLAGS:
                x86_pmu.lbr_from_flags = 1;
                break;
index 8b392b6..3de6d8b 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/io.h>
 #include <asm/apic.h>
 #include <asm/desc.h>
+#include <asm/sev.h>
 #include <asm/hypervisor.h>
 #include <asm/hyperv-tlfs.h>
 #include <asm/mshyperv.h>
@@ -405,6 +406,11 @@ void __init hyperv_init(void)
        }
 
        if (hv_isolation_type_snp()) {
+               /* Negotiate GHCB Version. */
+               if (!hv_ghcb_negotiate_protocol())
+                       hv_ghcb_terminate(SEV_TERM_SET_GEN,
+                                         GHCB_SEV_ES_PROT_UNSUPPORTED);
+
                hv_ghcb_pg = alloc_percpu(union hv_ghcb *);
                if (!hv_ghcb_pg)
                        goto free_vp_assist_page;
index 2b99411..1dbcbd9 100644 (file)
@@ -53,6 +53,8 @@ union hv_ghcb {
        } hypercall;
 } __packed __aligned(HV_HYP_PAGE_SIZE);
 
+static u16 hv_ghcb_version __ro_after_init;
+
 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
 {
        union hv_ghcb *hv_ghcb;
@@ -96,12 +98,85 @@ u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
        return status;
 }
 
+static inline u64 rd_ghcb_msr(void)
+{
+       return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
+}
+
+static inline void wr_ghcb_msr(u64 val)
+{
+       native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val);
+}
+
+static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
+                                  u64 exit_info_1, u64 exit_info_2)
+{
+       /* Fill in protocol and format specifiers */
+       ghcb->protocol_version = hv_ghcb_version;
+       ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;
+
+       ghcb_set_sw_exit_code(ghcb, exit_code);
+       ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
+       ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
+
+       VMGEXIT();
+
+       if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0))
+               return ES_VMM_ERROR;
+       else
+               return ES_OK;
+}
+
+void hv_ghcb_terminate(unsigned int set, unsigned int reason)
+{
+       u64 val = GHCB_MSR_TERM_REQ;
+
+       /* Tell the hypervisor what went wrong. */
+       val |= GHCB_SEV_TERM_REASON(set, reason);
+
+       /* Request Guest Termination from Hypvervisor */
+       wr_ghcb_msr(val);
+       VMGEXIT();
+
+       while (true)
+               asm volatile("hlt\n" : : : "memory");
+}
+
+bool hv_ghcb_negotiate_protocol(void)
+{
+       u64 ghcb_gpa;
+       u64 val;
+
+       /* Save ghcb page gpa. */
+       ghcb_gpa = rd_ghcb_msr();
+
+       /* Do the GHCB protocol version negotiation */
+       wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
+       VMGEXIT();
+       val = rd_ghcb_msr();
+
+       if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
+               return false;
+
+       if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
+           GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
+               return false;
+
+       hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val),
+                            GHCB_PROTOCOL_MAX);
+
+       /* Write ghcb page back after negotiating protocol. */
+       wr_ghcb_msr(ghcb_gpa);
+       VMGEXIT();
+
+       return true;
+}
+
 void hv_ghcb_msr_write(u64 msr, u64 value)
 {
        union hv_ghcb *hv_ghcb;
        void **ghcb_base;
        unsigned long flags;
-       struct es_em_ctxt ctxt;
 
        if (!hv_ghcb_pg)
                return;
@@ -120,8 +195,7 @@ void hv_ghcb_msr_write(u64 msr, u64 value)
        ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value));
        ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value));
 
-       if (sev_es_ghcb_hv_call(&hv_ghcb->ghcb, false, &ctxt,
-                               SVM_EXIT_MSR, 1, 0))
+       if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0))
                pr_warn("Fail to write msr via ghcb %llx.\n", msr);
 
        local_irq_restore(flags);
@@ -133,7 +207,6 @@ void hv_ghcb_msr_read(u64 msr, u64 *value)
        union hv_ghcb *hv_ghcb;
        void **ghcb_base;
        unsigned long flags;
-       struct es_em_ctxt ctxt;
 
        /* Check size of union hv_ghcb here. */
        BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
@@ -152,8 +225,7 @@ void hv_ghcb_msr_read(u64 msr, u64 *value)
        }
 
        ghcb_set_rcx(&hv_ghcb->ghcb, msr);
-       if (sev_es_ghcb_hv_call(&hv_ghcb->ghcb, false, &ctxt,
-                               SVM_EXIT_MSR, 0, 0))
+       if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0))
                pr_warn("Fail to read msr via ghcb %llx.\n", msr);
        else
                *value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
index 9b10c8c..9542c58 100644 (file)
@@ -76,6 +76,7 @@ extern int alternatives_patched;
 extern void alternative_instructions(void);
 extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
 extern void apply_retpolines(s32 *start, s32 *end);
+extern void apply_returns(s32 *start, s32 *end);
 extern void apply_ibt_endbr(s32 *start, s32 *end);
 
 struct module;
index 393f2bb..5fe7f6c 100644 (file)
 #define X86_FEATURE_PROC_FEEDBACK      ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_XCOMPACTED         ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
 #define X86_FEATURE_PTI                        ( 7*32+11) /* Kernel Page Table Isolation enabled */
-#define X86_FEATURE_RETPOLINE          ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
-#define X86_FEATURE_RETPOLINE_LFENCE   ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */
+#define X86_FEATURE_KERNEL_IBRS                ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
+#define X86_FEATURE_RSB_VMEXIT         ( 7*32+13) /* "" Fill RSB on VM-Exit */
 #define X86_FEATURE_INTEL_PPIN         ( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_CDP_L2             ( 7*32+15) /* Code and Data Prioritization L2 */
 #define X86_FEATURE_MSR_SPEC_CTRL      ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
 #define X86_FEATURE_IBRS               ( 7*32+25) /* Indirect Branch Restricted Speculation */
 #define X86_FEATURE_IBPB               ( 7*32+26) /* Indirect Branch Prediction Barrier */
 #define X86_FEATURE_STIBP              ( 7*32+27) /* Single Thread Indirect Branch Predictors */
-#define X86_FEATURE_ZEN                        ( 7*32+28) /* "" CPU is AMD family 0x17 or above (Zen) */
+#define X86_FEATURE_ZEN                        (7*32+28) /* "" CPU based on Zen microarchitecture */
 #define X86_FEATURE_L1TF_PTEINV                ( 7*32+29) /* "" L1TF workaround PTE inversion */
 #define X86_FEATURE_IBRS_ENHANCED      ( 7*32+30) /* Enhanced IBRS */
 #define X86_FEATURE_MSR_IA32_FEAT_CTL  ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
 #define X86_FEATURE_PER_THREAD_MBA     (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
 #define X86_FEATURE_SGX1               (11*32+ 8) /* "" Basic SGX */
 #define X86_FEATURE_SGX2               (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */
+#define X86_FEATURE_ENTRY_IBPB         (11*32+10) /* "" Issue an IBPB on kernel entry */
+#define X86_FEATURE_RRSBA_CTRL         (11*32+11) /* "" RET prediction control */
+#define X86_FEATURE_RETPOLINE          (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_LFENCE   (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
+#define X86_FEATURE_RETHUNK            (11*32+14) /* "" Use REturn THUNK */
+#define X86_FEATURE_UNRET              (11*32+15) /* "" AMD BTB untrain return */
+#define X86_FEATURE_USE_IBPB_FW                (11*32+16) /* "" Use IBPB during runtime firmware calls */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
 #define X86_FEATURE_AVX_VNNI           (12*32+ 4) /* AVX VNNI instructions */
 #define X86_FEATURE_VIRT_SSBD          (13*32+25) /* Virtualized Speculative Store Bypass Disable */
 #define X86_FEATURE_AMD_SSB_NO         (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
 #define X86_FEATURE_CPPC               (13*32+27) /* Collaborative Processor Performance Control */
+#define X86_FEATURE_BTC_NO             (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
 #define X86_FEATURE_BRS                        (13*32+31) /* Branch Sampling available */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 #define X86_BUG_TAA                    X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
 #define X86_BUG_ITLB_MULTIHIT          X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
 #define X86_BUG_SRBDS                  X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
+#define X86_BUG_MMIO_STALE_DATA                X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
+#define X86_BUG_RETBLEED               X86_BUG(26) /* CPU is affected by RETBleed */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index 36369e7..33d2cd0 100644 (file)
 # define DISABLE_PTI           (1 << (X86_FEATURE_PTI & 31))
 #endif
 
+#ifdef CONFIG_RETPOLINE
+# define DISABLE_RETPOLINE     0
+#else
+# define DISABLE_RETPOLINE     ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
+                                (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)))
+#endif
+
+#ifdef CONFIG_RETHUNK
+# define DISABLE_RETHUNK       0
+#else
+# define DISABLE_RETHUNK       (1 << (X86_FEATURE_RETHUNK & 31))
+#endif
+
+#ifdef CONFIG_CPU_UNRET_ENTRY
+# define DISABLE_UNRET         0
+#else
+# define DISABLE_UNRET         (1 << (X86_FEATURE_UNRET & 31))
+#endif
+
 #ifdef CONFIG_INTEL_IOMMU_SVM
 # define DISABLE_ENQCMD                0
 #else
 #define DISABLED_MASK8 (DISABLE_TDX_GUEST)
 #define DISABLED_MASK9 (DISABLE_SGX)
 #define DISABLED_MASK10        0
-#define DISABLED_MASK11        0
+#define DISABLED_MASK11        (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET)
 #define DISABLED_MASK12        0
 #define DISABLED_MASK13        0
 #define DISABLED_MASK14        0
index 5a39ed5..e8f58dd 100644 (file)
@@ -4,9 +4,6 @@
 
 #include <asm/e820/types.h>
 
-struct device;
-struct resource;
-
 extern struct e820_table *e820_table;
 extern struct e820_table *e820_table_kexec;
 extern struct e820_table *e820_table_firmware;
@@ -46,8 +43,6 @@ extern void e820__register_nosave_regions(unsigned long limit_pfn);
 
 extern int  e820__get_entry_type(u64 start, u64 end);
 
-extern void remove_e820_regions(struct device *dev, struct resource *avail);
-
 /*
  * Returns true iff the specified range [start,end) is completely contained inside
  * the ISA region.
index 71943dc..9636742 100644 (file)
@@ -323,7 +323,7 @@ static inline u32 efi64_convert_status(efi_status_t status)
 #define __efi64_argmap_get_memory_space_descriptor(phys, desc) \
        (__efi64_split(phys), (desc))
 
-#define __efi64_argmap_set_memory_space_descriptor(phys, size, flags) \
+#define __efi64_argmap_set_memory_space_attributes(phys, size, flags) \
        (__efi64_split(phys), __efi64_split(size), __efi64_split(flags))
 
 /*
index 6b0f31f..503a577 100644 (file)
@@ -164,4 +164,6 @@ static inline bool fpstate_is_confidential(struct fpu_guest *gfpu)
 /* prctl */
 extern long fpu_xstate_prctl(int option, unsigned long arg2);
 
+extern void fpu_idle_fpregs(void);
+
 #endif /* _ASM_X86_FPU_API_H */
index 3a240a6..9217bd6 100644 (file)
@@ -1047,14 +1047,77 @@ struct kvm_x86_msr_filter {
 };
 
 enum kvm_apicv_inhibit {
+
+       /********************************************************************/
+       /* INHIBITs that are relevant to both Intel's APICv and AMD's AVIC. */
+       /********************************************************************/
+
+       /*
+        * APIC acceleration is disabled by a module parameter
+        * and/or not supported in hardware.
+        */
        APICV_INHIBIT_REASON_DISABLE,
+
+       /*
+        * APIC acceleration is inhibited because AutoEOI feature is
+        * being used by a HyperV guest.
+        */
        APICV_INHIBIT_REASON_HYPERV,
+
+       /*
+        * APIC acceleration is inhibited because the userspace didn't yet
+        * enable the kernel/split irqchip.
+        */
+       APICV_INHIBIT_REASON_ABSENT,
+
+       /* APIC acceleration is inhibited because KVM_GUESTDBG_BLOCKIRQ
+        * (out of band, debug measure of blocking all interrupts on this vCPU)
+        * was enabled, to avoid AVIC/APICv bypassing it.
+        */
+       APICV_INHIBIT_REASON_BLOCKIRQ,
+
+       /*
+        * For simplicity, the APIC acceleration is inhibited
+        * first time either APIC ID or APIC base are changed by the guest
+        * from their reset values.
+        */
+       APICV_INHIBIT_REASON_APIC_ID_MODIFIED,
+       APICV_INHIBIT_REASON_APIC_BASE_MODIFIED,
+
+       /******************************************************/
+       /* INHIBITs that are relevant only to the AMD's AVIC. */
+       /******************************************************/
+
+       /*
+        * AVIC is inhibited on a vCPU because it runs a nested guest.
+        *
+        * This is needed because unlike APICv, the peers of this vCPU
+        * cannot use the doorbell mechanism to signal interrupts via AVIC when
+        * a vCPU runs nested.
+        */
        APICV_INHIBIT_REASON_NESTED,
+
+       /*
+        * On SVM, the wait for the IRQ window is implemented with pending vIRQ,
+        * which cannot be injected when the AVIC is enabled, thus AVIC
+        * is inhibited while KVM waits for IRQ window.
+        */
        APICV_INHIBIT_REASON_IRQWIN,
+
+       /*
+        * PIT (i8254) 're-inject' mode, relies on EOI intercept,
+        * which AVIC doesn't support for edge triggered interrupts.
+        */
        APICV_INHIBIT_REASON_PIT_REINJ,
+
+       /*
+        * AVIC is inhibited because the guest has x2apic in its CPUID.
+        */
        APICV_INHIBIT_REASON_X2APIC,
-       APICV_INHIBIT_REASON_BLOCKIRQ,
-       APICV_INHIBIT_REASON_ABSENT,
+
+       /*
+        * AVIC is disabled because SEV doesn't support it.
+        */
        APICV_INHIBIT_REASON_SEV,
 };
 
index 85865f1..73ca200 100644 (file)
 #define __ALIGN_STR    __stringify(__ALIGN)
 #endif
 
+#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
+#define RET    jmp __x86_return_thunk
+#else /* CONFIG_RETPOLINE */
 #ifdef CONFIG_SLS
 #define RET    ret; int3
 #else
 #define RET    ret
 #endif
+#endif /* CONFIG_RETPOLINE */
 
 #else /* __ASSEMBLY__ */
 
+#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
+#define ASM_RET        "jmp __x86_return_thunk\n\t"
+#else /* CONFIG_RETPOLINE */
 #ifdef CONFIG_SLS
 #define ASM_RET        "ret; int3\n\t"
 #else
 #define ASM_RET        "ret\n\t"
 #endif
+#endif /* CONFIG_RETPOLINE */
 
 #endif /* __ASSEMBLY__ */
 
index a82f603..61f0c20 100644 (file)
@@ -179,9 +179,13 @@ int hv_set_mem_host_visibility(unsigned long addr, int numpages, bool visible);
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 void hv_ghcb_msr_write(u64 msr, u64 value);
 void hv_ghcb_msr_read(u64 msr, u64 *value);
+bool hv_ghcb_negotiate_protocol(void);
+void hv_ghcb_terminate(unsigned int set, unsigned int reason);
 #else
 static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
 static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
+static inline bool hv_ghcb_negotiate_protocol(void) { return false; }
+static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {}
 #endif
 
 extern bool hv_isolation_type_snp(void);
index 403e83b..cc615be 100644 (file)
@@ -51,6 +51,8 @@
 #define SPEC_CTRL_STIBP                        BIT(SPEC_CTRL_STIBP_SHIFT)      /* STIBP mask */
 #define SPEC_CTRL_SSBD_SHIFT           2          /* Speculative Store Bypass Disable bit */
 #define SPEC_CTRL_SSBD                 BIT(SPEC_CTRL_SSBD_SHIFT)       /* Speculative Store Bypass Disable */
+#define SPEC_CTRL_RRSBA_DIS_S_SHIFT    6          /* Disable RRSBA behavior */
+#define SPEC_CTRL_RRSBA_DIS_S          BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
 
 #define MSR_IA32_PRED_CMD              0x00000049 /* Prediction Command */
 #define PRED_CMD_IBPB                  BIT(0)     /* Indirect Branch Prediction Barrier */
@@ -93,6 +95,7 @@
 #define MSR_IA32_ARCH_CAPABILITIES     0x0000010a
 #define ARCH_CAP_RDCL_NO               BIT(0)  /* Not susceptible to Meltdown */
 #define ARCH_CAP_IBRS_ALL              BIT(1)  /* Enhanced IBRS support */
+#define ARCH_CAP_RSBA                  BIT(2)  /* RET may use alternative branch predictors */
 #define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3)  /* Skip L1D flush on vmentry */
 #define ARCH_CAP_SSB_NO                        BIT(4)  /*
                                                 * Not susceptible to Speculative Store Bypass
                                                 * Not susceptible to
                                                 * TSX Async Abort (TAA) vulnerabilities.
                                                 */
+#define ARCH_CAP_SBDR_SSDP_NO          BIT(13) /*
+                                                * Not susceptible to SBDR and SSDP
+                                                * variants of Processor MMIO stale data
+                                                * vulnerabilities.
+                                                */
+#define ARCH_CAP_FBSDP_NO              BIT(14) /*
+                                                * Not susceptible to FBSDP variant of
+                                                * Processor MMIO stale data
+                                                * vulnerabilities.
+                                                */
+#define ARCH_CAP_PSDP_NO               BIT(15) /*
+                                                * Not susceptible to PSDP variant of
+                                                * Processor MMIO stale data
+                                                * vulnerabilities.
+                                                */
+#define ARCH_CAP_FB_CLEAR              BIT(17) /*
+                                                * VERW clears CPU fill buffer
+                                                * even on MDS_NO CPUs.
+                                                */
+#define ARCH_CAP_FB_CLEAR_CTRL         BIT(18) /*
+                                                * MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]
+                                                * bit available to control VERW
+                                                * behavior.
+                                                */
+#define ARCH_CAP_RRSBA                 BIT(19) /*
+                                                * Indicates RET may use predictors
+                                                * other than the RSB. With eIBRS
+                                                * enabled predictions in kernel mode
+                                                * are restricted to targets in
+                                                * kernel.
+                                                */
 
 #define MSR_IA32_FLUSH_CMD             0x0000010b
 #define L1D_FLUSH                      BIT(0)  /*
 #define MSR_IA32_MCU_OPT_CTRL          0x00000123
 #define RNGDS_MITG_DIS                 BIT(0)  /* SRBDS support */
 #define RTM_ALLOW                      BIT(1)  /* TSX development mode */
+#define FB_CLEAR_DIS                   BIT(3)  /* CPU Fill buffer clear disable */
 
 #define MSR_IA32_SYSENTER_CS           0x00000174
 #define MSR_IA32_SYSENTER_ESP          0x00000175
 /* Fam 17h MSRs */
 #define MSR_F17H_IRPERF                        0xc00000e9
 
+#define MSR_ZEN2_SPECTRAL_CHICKEN      0xc00110e3
+#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT  BIT_ULL(1)
+
 /* Fam 16h MSRs */
 #define MSR_F16H_L2I_PERF_CTL          0xc0010230
 #define MSR_F16H_L2I_PERF_CTR          0xc0010231
index 29dd27b..3a8fdf8 100644 (file)
@@ -13,6 +13,7 @@
 #define MWAIT_SUBSTATE_SIZE            4
 #define MWAIT_HINT2CSTATE(hint)                (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK)
 #define MWAIT_HINT2SUBSTATE(hint)      ((hint) & MWAIT_CSTATE_MASK)
+#define MWAIT_C1_SUBSTATE_MASK  0xf0
 
 #define CPUID_MWAIT_LEAF               5
 #define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
index acbaeaf..cba9420 100644 (file)
@@ -11,6 +11,7 @@
 #include <asm/cpufeatures.h>
 #include <asm/msr-index.h>
 #include <asm/unwind_hints.h>
+#include <asm/percpu.h>
 
 #define RETPOLINE_THUNK_SIZE   32
 
 .endm
 
 /*
+ * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
+ * vs RETBleed validation.
+ */
+#define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
+
+/*
+ * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
+ * eventually turn into it's own annotation.
+ */
+.macro ANNOTATE_UNRET_END
+#ifdef CONFIG_DEBUG_ENTRY
+       ANNOTATE_RETPOLINE_SAFE
+       nop
+#endif
+.endm
+
+/*
+ * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
+ * to the retpoline thunk with a CS prefix when the register requires
+ * a RAX prefix byte to encode. Also see apply_retpolines().
+ */
+.macro __CS_PREFIX reg:req
+       .irp rs,r8,r9,r10,r11,r12,r13,r14,r15
+       .ifc \reg,\rs
+       .byte 0x2e
+       .endif
+       .endr
+.endm
+
+/*
  * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
  * indirect jmp/call which may be susceptible to the Spectre variant 2
  * attack.
  */
 .macro JMP_NOSPEC reg:req
 #ifdef CONFIG_RETPOLINE
-       ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
-                     __stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
-                     __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_LFENCE
+       __CS_PREFIX \reg
+       jmp     __x86_indirect_thunk_\reg
 #else
        jmp     *%\reg
+       int3
 #endif
 .endm
 
 .macro CALL_NOSPEC reg:req
 #ifdef CONFIG_RETPOLINE
-       ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
-                     __stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
-                     __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_LFENCE
+       __CS_PREFIX \reg
+       call    __x86_indirect_thunk_\reg
 #else
        call    *%\reg
 #endif
   * monstrosity above, manually.
   */
 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
-#ifdef CONFIG_RETPOLINE
        ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
        __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
 .Lskip_rsb_\@:
+.endm
+
+#ifdef CONFIG_CPU_UNRET_ENTRY
+#define CALL_ZEN_UNTRAIN_RET   "call zen_untrain_ret"
+#else
+#define CALL_ZEN_UNTRAIN_RET   ""
+#endif
+
+/*
+ * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
+ * return thunk isn't mapped into the userspace tables (then again, AMD
+ * typically has NO_MELTDOWN).
+ *
+ * While zen_untrain_ret() doesn't clobber anything but requires stack,
+ * entry_ibpb() will clobber AX, CX, DX.
+ *
+ * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
+ * where we have a stack but before any RET instruction.
+ */
+.macro UNTRAIN_RET
+#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY)
+       ANNOTATE_UNRET_END
+       ALTERNATIVE_2 "",                                               \
+                     CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET,          \
+                     "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
 #endif
 .endm
 
        _ASM_PTR " 999b\n\t"                                    \
        ".popsection\n\t"
 
-#ifdef CONFIG_RETPOLINE
-
 typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
+extern retpoline_thunk_t __x86_indirect_thunk_array[];
+
+extern void __x86_return_thunk(void);
+extern void zen_untrain_ret(void);
+extern void entry_ibpb(void);
+
+#ifdef CONFIG_RETPOLINE
 
 #define GEN(reg) \
        extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
 #include <asm/GEN-for-each-reg.h>
 #undef GEN
 
-extern retpoline_thunk_t __x86_indirect_thunk_array[];
-
 #ifdef CONFIG_X86_64
 
 /*
@@ -193,6 +250,7 @@ enum spectre_v2_mitigation {
        SPECTRE_V2_EIBRS,
        SPECTRE_V2_EIBRS_RETPOLINE,
        SPECTRE_V2_EIBRS_LFENCE,
+       SPECTRE_V2_IBRS,
 };
 
 /* The indirect branch speculation control variants */
@@ -235,6 +293,9 @@ static inline void indirect_branch_prediction_barrier(void)
 
 /* The Intel SPEC CTRL MSR base value cache */
 extern u64 x86_spec_ctrl_base;
+DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
+extern void write_spec_ctrl_current(u64 val, bool force);
+extern u64 spec_ctrl_current(void);
 
 /*
  * With retpoline, we must use IBRS to restrict branch prediction
@@ -244,18 +305,18 @@ extern u64 x86_spec_ctrl_base;
  */
 #define firmware_restrict_branch_speculation_start()                   \
 do {                                                                   \
-       u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS;                  \
-                                                                       \
        preempt_disable();                                              \
-       alternative_msr_write(MSR_IA32_SPEC_CTRL, val,                  \
+       alternative_msr_write(MSR_IA32_SPEC_CTRL,                       \
+                             spec_ctrl_current() | SPEC_CTRL_IBRS,     \
                              X86_FEATURE_USE_IBRS_FW);                 \
+       alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,         \
+                             X86_FEATURE_USE_IBPB_FW);                 \
 } while (0)
 
 #define firmware_restrict_branch_speculation_end()                     \
 do {                                                                   \
-       u64 val = x86_spec_ctrl_base;                                   \
-                                                                       \
-       alternative_msr_write(MSR_IA32_SPEC_CTRL, val,                  \
+       alternative_msr_write(MSR_IA32_SPEC_CTRL,                       \
+                             spec_ctrl_current(),                      \
                              X86_FEATURE_USE_IBRS_FW);                 \
        preempt_enable();                                               \
 } while (0)
@@ -269,6 +330,8 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
 
 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
 
+DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
+
 #include <asm/segment.h>
 
 /**
index f52a886..70533fd 100644 (file)
@@ -69,6 +69,8 @@ void pcibios_scan_specific_bus(int busn);
 
 /* pci-irq.c */
 
+struct pci_dev;
+
 struct irq_info {
        u8 bus, devfn;                  /* Bus, device and function */
        struct {
@@ -246,3 +248,9 @@ static inline void mmio_config_writel(void __iomem *pos, u32 val)
 # define x86_default_pci_init_irq      NULL
 # define x86_default_pci_fixup_irqs    NULL
 #endif
+
+#if defined(CONFIG_PCI) && defined(CONFIG_ACPI)
+extern bool pci_use_e820;
+#else
+#define pci_use_e820 false
+#endif
index 7590ac2..f37cbff 100644 (file)
@@ -108,21 +108,21 @@ extern unsigned long _brk_end;
 void *extend_brk(size_t size, size_t align);
 
 /*
- * Reserve space in the brk section.  The name must be unique within the file,
- * and somewhat descriptive.  The size is in bytes.
+ * Reserve space in the .brk section, which is a block of memory from which the
+ * caller is allowed to allocate very early (before even memblock is available)
+ * by calling extend_brk().  All allocated memory will be eventually converted
+ * to memblock.  Any leftover unallocated memory will be freed.
  *
- * The allocation is done using inline asm (rather than using a section
- * attribute on a normal variable) in order to allow the use of @nobits, so
- * that it doesn't take up any space in the vmlinux file.
+ * The size is in bytes.
  */
-#define RESERVE_BRK(name, size)                                                \
-       asm(".pushsection .brk_reservation,\"aw\",@nobits\n\t"          \
-           ".brk." #name ":\n\t"                                       \
-           ".skip " __stringify(size) "\n\t"                           \
-           ".size .brk." #name ", " __stringify(size) "\n\t"           \
-           ".popsection\n\t")
+#define RESERVE_BRK(name, size)                                        \
+       __section(".bss..brk") __aligned(1) __used      \
+       static char __brk_##name[size]
 
 extern void probe_roms(void);
+
+void clear_bss(void);
+
 #ifdef __i386__
 
 asmlinkage void __init i386_start_kernel(void);
@@ -133,12 +133,19 @@ asmlinkage void __init x86_64_start_reservations(char *real_mode_data);
 
 #endif /* __i386__ */
 #endif /* _SETUP */
-#else
-#define RESERVE_BRK(name,sz)                           \
-       .pushsection .brk_reservation,"aw",@nobits;     \
-.brk.name:                                             \
-1:     .skip sz;                                       \
-       .size .brk.name,.-1b;                           \
+
+#else  /* __ASSEMBLY */
+
+.macro __RESERVE_BRK name, size
+       .pushsection .bss..brk, "aw"
+SYM_DATA_START(__brk_\name)
+       .skip \size
+SYM_DATA_END(__brk_\name)
        .popsection
+.endm
+
+#define RESERVE_BRK(name, size) __RESERVE_BRK name, size
+
 #endif /* __ASSEMBLY__ */
+
 #endif /* _ASM_X86_SETUP_H */
index 1951452..4a23e52 100644 (file)
@@ -72,7 +72,6 @@ static inline u64 lower_bits(u64 val, unsigned int bits)
 
 struct real_mode_header;
 enum stack_type;
-struct ghcb;
 
 /* Early IDT entry points for #VC handler */
 extern void vc_no_ghcb(void);
@@ -156,11 +155,7 @@ static __always_inline void sev_es_nmi_complete(void)
                __sev_es_nmi_complete();
 }
 extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
-extern enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
-                                         bool set_ghcb_msr,
-                                         struct es_em_ctxt *ctxt,
-                                         u64 exit_code, u64 exit_info_1,
-                                         u64 exit_info_2);
+
 static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs)
 {
        int rc;
index 45b18eb..35f709f 100644 (file)
@@ -295,6 +295,15 @@ static inline int enqcmds(void __iomem *dst, const void *src)
        return 0;
 }
 
+static inline void tile_release(void)
+{
+       /*
+        * Instruction opcode for TILERELEASE; supported in binutils
+        * version >= 2.36.
+        */
+       asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0");
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_X86_SPECIAL_INSNS_H */
index 2d8dacd..343b722 100644 (file)
  * relative displacement across sections.
  */
 
+/*
+ * The trampoline is 8 bytes and of the general form:
+ *
+ *   jmp.d32 \func
+ *   ud1 %esp, %ecx
+ *
+ * That trailing #UD provides both a speculation stop and serves as a unique
+ * 3 byte signature identifying static call trampolines. Also see tramp_ud[]
+ * and __static_call_fixup().
+ */
 #define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, insns)                   \
        asm(".pushsection .static_call.text, \"ax\"             \n"     \
            ".align 4                                           \n"     \
@@ -28,7 +38,7 @@
            STATIC_CALL_TRAMP_STR(name) ":                      \n"     \
            ANNOTATE_NOENDBR                                            \
            insns "                                             \n"     \
-           ".byte 0x53, 0x43, 0x54                             \n"     \
+           ".byte 0x0f, 0xb9, 0xcc                             \n"     \
            ".type " STATIC_CALL_TRAMP_STR(name) ", @function   \n"     \
            ".size " STATIC_CALL_TRAMP_STR(name) ", . - " STATIC_CALL_TRAMP_STR(name) " \n" \
            ".popsection                                        \n")
 #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func)                      \
        __ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)")
 
+#ifdef CONFIG_RETHUNK
+#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)                       \
+       __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "jmp __x86_return_thunk")
+#else
 #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)                       \
        __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; int3; nop; nop; nop")
+#endif
 
 #define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)                       \
        ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0)
@@ -48,4 +63,6 @@
            ".long " STATIC_CALL_KEY_STR(name) " - .            \n"     \
            ".popsection                                        \n")
 
+extern bool __static_call_fixup(void *tramp, u8 op, void *dest);
+
 #endif /* _ASM_STATIC_CALL_H */
index 1bfe979..580636c 100644 (file)
@@ -2,9 +2,6 @@
 #ifndef _ASM_X86_TLB_H
 #define _ASM_X86_TLB_H
 
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-
 #define tlb_flush tlb_flush
 static inline void tlb_flush(struct mmu_gather *tlb);
 
index 4af5579..cda3118 100644 (file)
@@ -16,6 +16,7 @@
 void __flush_tlb_all(void);
 
 #define TLB_FLUSH_ALL  -1UL
+#define TLB_GENERATION_INVALID 0
 
 void cr4_update_irqsoff(unsigned long set, unsigned long clear);
 unsigned long cr4_read_shadow(void);
index 8b33674..f66fbe6 100644 (file)
@@ -8,7 +8,11 @@
 #ifdef __ASSEMBLY__
 
 .macro UNWIND_HINT_EMPTY
-       UNWIND_HINT sp_reg=ORC_REG_UNDEFINED type=UNWIND_HINT_TYPE_CALL end=1
+       UNWIND_HINT type=UNWIND_HINT_TYPE_CALL end=1
+.endm
+
+.macro UNWIND_HINT_ENTRY
+       UNWIND_HINT type=UNWIND_HINT_TYPE_ENTRY end=1
 .endm
 
 .macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 partial=0
        UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=8 type=UNWIND_HINT_TYPE_FUNC
 .endm
 
+.macro UNWIND_HINT_SAVE
+       UNWIND_HINT type=UNWIND_HINT_TYPE_SAVE
+.endm
+
+.macro UNWIND_HINT_RESTORE
+       UNWIND_HINT type=UNWIND_HINT_TYPE_RESTORE
+.endm
+
 #else
 
 #define UNWIND_HINT_FUNC \
index bea5cdc..3422906 100644 (file)
 #define SETUP_APPLE_PROPERTIES         5
 #define SETUP_JAILHOUSE                        6
 #define SETUP_CC_BLOB                  7
+#define SETUP_IMA                      8
+#define SETUP_RNG_SEED                 9
+#define SETUP_ENUM_MAX                 SETUP_RNG_SEED
 
 #define SETUP_INDIRECT                 (1<<31)
-
-/* SETUP_INDIRECT | max(SETUP_*) */
-#define SETUP_TYPE_MAX                 (SETUP_INDIRECT | SETUP_JAILHOUSE)
+#define SETUP_TYPE_MAX                 (SETUP_ENUM_MAX | SETUP_INDIRECT)
 
 /* ram_size flags */
 #define RAMDISK_IMAGE_START_MASK       0x07FF
@@ -172,6 +173,14 @@ struct jailhouse_setup_data {
        } __attribute__((packed)) v2;
 } __attribute__((packed));
 
+/*
+ * IMA buffer setup data information from the previous kernel during kexec
+ */
+struct ima_setup_data {
+       __u64 addr;
+       __u64 size;
+} __attribute__((packed));
+
 /* The so-called "zeropage" */
 struct boot_params {
        struct screen_info screen_info;                 /* 0x000 */
index 03364dc..a20a5eb 100644 (file)
@@ -34,12 +34,6 @@ KASAN_SANITIZE_sev.o                                 := n
 # by several compilation units. To be safe, disable all instrumentation.
 KCSAN_SANITIZE := n
 
-OBJECT_FILES_NON_STANDARD_test_nx.o                    := y
-
-ifdef CONFIG_FRAME_POINTER
-OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o             := y
-endif
-
 # If instrumentation of this dir is enabled, boot hangs during first second.
 # Probably could be more selective here, but note that files related to irqs,
 # boot, dumpstack/stacktrace, etc are either non-interesting or can lead to
index 8b8cbf2..8d8752b 100644 (file)
 
 /* Refer to drivers/acpi/cppc_acpi.c for the description of functions */
 
+bool cpc_supported_by_cpu(void)
+{
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_AMD:
+       case X86_VENDOR_HYGON:
+               if (boot_cpu_data.x86 == 0x19 && ((boot_cpu_data.x86_model <= 0x0f) ||
+                   (boot_cpu_data.x86_model >= 0x20 && boot_cpu_data.x86_model <= 0x2f)))
+                       return true;
+               else if (boot_cpu_data.x86 == 0x17 &&
+                        boot_cpu_data.x86_model >= 0x70 && boot_cpu_data.x86_model <= 0x7f)
+                       return true;
+               return boot_cpu_has(X86_FEATURE_CPPC);
+       }
+       return false;
+}
+
 bool cpc_ffh_supported(void)
 {
        return true;
index e257f6c..62f6b8b 100644 (file)
@@ -115,6 +115,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
 }
 
 extern s32 __retpoline_sites[], __retpoline_sites_end[];
+extern s32 __return_sites[], __return_sites_end[];
 extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
 extern s32 __smp_locks[], __smp_locks_end[];
@@ -507,9 +508,78 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
        }
 }
 
+#ifdef CONFIG_RETHUNK
+/*
+ * Rewrite the compiler generated return thunk tail-calls.
+ *
+ * For example, convert:
+ *
+ *   JMP __x86_return_thunk
+ *
+ * into:
+ *
+ *   RET
+ */
+static int patch_return(void *addr, struct insn *insn, u8 *bytes)
+{
+       int i = 0;
+
+       if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
+               return -1;
+
+       bytes[i++] = RET_INSN_OPCODE;
+
+       for (; i < insn->length;)
+               bytes[i++] = INT3_INSN_OPCODE;
+
+       return i;
+}
+
+void __init_or_module noinline apply_returns(s32 *start, s32 *end)
+{
+       s32 *s;
+
+       for (s = start; s < end; s++) {
+               void *dest = NULL, *addr = (void *)s + *s;
+               struct insn insn;
+               int len, ret;
+               u8 bytes[16];
+               u8 op;
+
+               ret = insn_decode_kernel(&insn, addr);
+               if (WARN_ON_ONCE(ret < 0))
+                       continue;
+
+               op = insn.opcode.bytes[0];
+               if (op == JMP32_INSN_OPCODE)
+                       dest = addr + insn.length + insn.immediate.value;
+
+               if (__static_call_fixup(addr, op, dest) ||
+                   WARN_ONCE(dest != &__x86_return_thunk,
+                             "missing return thunk: %pS-%pS: %*ph",
+                             addr, dest, 5, addr))
+                       continue;
+
+               DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",
+                       addr, addr, insn.length,
+                       addr + insn.length + insn.immediate.value);
+
+               len = patch_return(addr, &insn, bytes);
+               if (len == insn.length) {
+                       DUMP_BYTES(((u8*)addr),  len, "%px: orig: ", addr);
+                       DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr);
+                       text_poke_early(addr, bytes, len);
+               }
+       }
+}
+#else
+void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
+#endif /* CONFIG_RETHUNK */
+
 #else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */
 
 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
+void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
 
 #endif /* CONFIG_RETPOLINE && CONFIG_OBJTOOL */
 
@@ -860,6 +930,7 @@ void __init alternative_instructions(void)
         * those can rewrite the retpoline thunks.
         */
        apply_retpolines(__retpoline_sites, __retpoline_sites_end);
+       apply_returns(__return_sites, __return_sites_end);
 
        /*
         * Then patch alternatives, such that those paravirt calls that are in
index 190e0f7..4266b64 100644 (file)
 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT        0x15d0
 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT        0x1480
 #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT        0x1630
+#define PCI_DEVICE_ID_AMD_17H_MA0H_ROOT        0x14b5
 #define PCI_DEVICE_ID_AMD_19H_M10H_ROOT        0x14a4
+#define PCI_DEVICE_ID_AMD_19H_M60H_ROOT        0x14d8
+#define PCI_DEVICE_ID_AMD_19H_M70H_ROOT        0x14e8
 #define PCI_DEVICE_ID_AMD_17H_DF_F4    0x1464
 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
+#define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4 0x1728
 #define PCI_DEVICE_ID_AMD_19H_DF_F4    0x1654
 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4 0x14b1
 #define PCI_DEVICE_ID_AMD_19H_M40H_ROOT        0x14b5
 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4 0x167d
 #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
+#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4
+#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4
 
 /* Protect the PCI config register pairs used for SMN. */
 static DEFINE_MUTEX(smn_mutex);
@@ -41,8 +47,11 @@ static const struct pci_device_id amd_root_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_ROOT) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
        {}
 };
 
@@ -61,12 +70,15 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
        {}
 };
 
@@ -81,6 +93,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
index 4373080..cb50589 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/suspend.h>
 #include <asm/tlbflush.h>
 #include <asm/tdx.h>
+#include "../kvm/vmx/vmx.h"
 
 #ifdef CONFIG_XEN
 #include <xen/interface/xen.h>
@@ -107,4 +108,9 @@ static void __used common(void)
        OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
        OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
        OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
+
+       if (IS_ENABLED(CONFIG_KVM_INTEL)) {
+               BLANK();
+               OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl);
+       }
 }
index 0c0b097..35d5288 100644 (file)
@@ -862,6 +862,28 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
        clear_rdrand_cpuid_bit(c);
 }
 
+void init_spectral_chicken(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_CPU_UNRET_ENTRY
+       u64 value;
+
+       /*
+        * On Zen2 we offer this chicken (bit) on the altar of Speculation.
+        *
+        * This suppresses speculation from the middle of a basic block, i.e. it
+        * suppresses non-branch predictions.
+        *
+        * We use STIBP as a heuristic to filter out Zen2 from the rest of F17H
+        */
+       if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_AMD_STIBP)) {
+               if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
+                       value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
+                       wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
+               }
+       }
+#endif
+}
+
 static void init_amd_zn(struct cpuinfo_x86 *c)
 {
        set_cpu_cap(c, X86_FEATURE_ZEN);
@@ -870,12 +892,21 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
        node_reclaim_distance = 32;
 #endif
 
-       /*
-        * Fix erratum 1076: CPB feature bit not being set in CPUID.
-        * Always set it, except when running under a hypervisor.
-        */
-       if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
-               set_cpu_cap(c, X86_FEATURE_CPB);
+       /* Fix up CPUID bits, but only if not virtualised. */
+       if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
+
+               /* Erratum 1076: CPB feature bit not being set in CPUID. */
+               if (!cpu_has(c, X86_FEATURE_CPB))
+                       set_cpu_cap(c, X86_FEATURE_CPB);
+
+               /*
+                * Zen3 (Fam19 model < 0x10) parts are not susceptible to
+                * Branch Type Confusion, but predate the allocation of the
+                * BTC_NO bit.
+                */
+               if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO))
+                       set_cpu_cap(c, X86_FEATURE_BTC_NO);
+       }
 }
 
 static void init_amd(struct cpuinfo_x86 *c)
@@ -907,7 +938,8 @@ static void init_amd(struct cpuinfo_x86 *c)
        case 0x12: init_amd_ln(c); break;
        case 0x15: init_amd_bd(c); break;
        case 0x16: init_amd_jg(c); break;
-       case 0x17: fallthrough;
+       case 0x17: init_spectral_chicken(c);
+                  fallthrough;
        case 0x19: init_amd_zn(c); break;
        }
 
index d879a6c..6761668 100644 (file)
 
 static void __init spectre_v1_select_mitigation(void);
 static void __init spectre_v2_select_mitigation(void);
+static void __init retbleed_select_mitigation(void);
+static void __init spectre_v2_user_select_mitigation(void);
 static void __init ssb_select_mitigation(void);
 static void __init l1tf_select_mitigation(void);
 static void __init mds_select_mitigation(void);
-static void __init mds_print_mitigation(void);
+static void __init md_clear_update_mitigation(void);
+static void __init md_clear_select_mitigation(void);
 static void __init taa_select_mitigation(void);
+static void __init mmio_select_mitigation(void);
 static void __init srbds_select_mitigation(void);
 static void __init l1d_flush_select_mitigation(void);
 
-/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
+/* The base value of the SPEC_CTRL MSR without task-specific bits set */
 u64 x86_spec_ctrl_base;
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+
+/* The current value of the SPEC_CTRL MSR with task-specific bits set */
+DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
+
 static DEFINE_MUTEX(spec_ctrl_mutex);
 
 /*
- * The vendor and possibly platform specific bits which can be modified in
- * x86_spec_ctrl_base.
+ * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
+ * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
  */
-static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
+void write_spec_ctrl_current(u64 val, bool force)
+{
+       if (this_cpu_read(x86_spec_ctrl_current) == val)
+               return;
+
+       this_cpu_write(x86_spec_ctrl_current, val);
+
+       /*
+        * When KERNEL_IBRS this MSR is written on return-to-user, unless
+        * forced the update can be delayed until that time.
+        */
+       if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
+               wrmsrl(MSR_IA32_SPEC_CTRL, val);
+}
+
+u64 spec_ctrl_current(void)
+{
+       return this_cpu_read(x86_spec_ctrl_current);
+}
+EXPORT_SYMBOL_GPL(spec_ctrl_current);
 
 /*
  * AMD specific MSR info for Speculative Store Bypass control.
@@ -85,6 +113,10 @@ EXPORT_SYMBOL_GPL(mds_idle_clear);
  */
 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
 
+/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
+DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
+EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
+
 void __init check_bugs(void)
 {
        identify_boot_cpu();
@@ -108,26 +140,27 @@ void __init check_bugs(void)
        if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
                rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
 
-       /* Allow STIBP in MSR_SPEC_CTRL if supported */
-       if (boot_cpu_has(X86_FEATURE_STIBP))
-               x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
-
        /* Select the proper CPU mitigations before patching alternatives: */
        spectre_v1_select_mitigation();
        spectre_v2_select_mitigation();
+       /*
+        * retbleed_select_mitigation() relies on the state set by
+        * spectre_v2_select_mitigation(); specifically it wants to know about
+        * spectre_v2=ibrs.
+        */
+       retbleed_select_mitigation();
+       /*
+        * spectre_v2_user_select_mitigation() relies on the state set by
+        * retbleed_select_mitigation(); specifically the STIBP selection is
+        * forced for UNRET.
+        */
+       spectre_v2_user_select_mitigation();
        ssb_select_mitigation();
        l1tf_select_mitigation();
-       mds_select_mitigation();
-       taa_select_mitigation();
+       md_clear_select_mitigation();
        srbds_select_mitigation();
        l1d_flush_select_mitigation();
 
-       /*
-        * As MDS and TAA mitigations are inter-related, print MDS
-        * mitigation until after TAA mitigation selection is done.
-        */
-       mds_print_mitigation();
-
        arch_smt_update();
 
 #ifdef CONFIG_X86_32
@@ -162,31 +195,17 @@ void __init check_bugs(void)
 #endif
 }
 
+/*
+ * NOTE: This function is *only* called for SVM.  VMX spec_ctrl handling is
+ * done in vmenter.S.
+ */
 void
 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
 {
-       u64 msrval, guestval, hostval = x86_spec_ctrl_base;
+       u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current();
        struct thread_info *ti = current_thread_info();
 
-       /* Is MSR_SPEC_CTRL implemented ? */
        if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
-               /*
-                * Restrict guest_spec_ctrl to supported values. Clear the
-                * modifiable bits in the host base value and or the
-                * modifiable bits from the guest value.
-                */
-               guestval = hostval & ~x86_spec_ctrl_mask;
-               guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
-
-               /* SSBD controlled in MSR_SPEC_CTRL */
-               if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
-                   static_cpu_has(X86_FEATURE_AMD_SSBD))
-                       hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
-
-               /* Conditional STIBP enabled? */
-               if (static_branch_unlikely(&switch_to_cond_stibp))
-                       hostval |= stibp_tif_to_spec_ctrl(ti->flags);
-
                if (hostval != guestval) {
                        msrval = setguest ? guestval : hostval;
                        wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
@@ -267,14 +286,6 @@ static void __init mds_select_mitigation(void)
        }
 }
 
-static void __init mds_print_mitigation(void)
-{
-       if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
-               return;
-
-       pr_info("%s\n", mds_strings[mds_mitigation]);
-}
-
 static int __init mds_cmdline(char *str)
 {
        if (!boot_cpu_has_bug(X86_BUG_MDS))
@@ -329,7 +340,7 @@ static void __init taa_select_mitigation(void)
        /* TSX previously disabled by tsx=off */
        if (!boot_cpu_has(X86_FEATURE_RTM)) {
                taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
-               goto out;
+               return;
        }
 
        if (cpu_mitigations_off()) {
@@ -343,7 +354,7 @@ static void __init taa_select_mitigation(void)
         */
        if (taa_mitigation == TAA_MITIGATION_OFF &&
            mds_mitigation == MDS_MITIGATION_OFF)
-               goto out;
+               return;
 
        if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
                taa_mitigation = TAA_MITIGATION_VERW;
@@ -375,18 +386,6 @@ static void __init taa_select_mitigation(void)
 
        if (taa_nosmt || cpu_mitigations_auto_nosmt())
                cpu_smt_disable(false);
-
-       /*
-        * Update MDS mitigation, if necessary, as the mds_user_clear is
-        * now enabled for TAA mitigation.
-        */
-       if (mds_mitigation == MDS_MITIGATION_OFF &&
-           boot_cpu_has_bug(X86_BUG_MDS)) {
-               mds_mitigation = MDS_MITIGATION_FULL;
-               mds_select_mitigation();
-       }
-out:
-       pr_info("%s\n", taa_strings[taa_mitigation]);
 }
 
 static int __init tsx_async_abort_parse_cmdline(char *str)
@@ -411,6 +410,151 @@ static int __init tsx_async_abort_parse_cmdline(char *str)
 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
 
 #undef pr_fmt
+#define pr_fmt(fmt)    "MMIO Stale Data: " fmt
+
+enum mmio_mitigations {
+       MMIO_MITIGATION_OFF,
+       MMIO_MITIGATION_UCODE_NEEDED,
+       MMIO_MITIGATION_VERW,
+};
+
+/* Default mitigation for Processor MMIO Stale Data vulnerabilities */
+static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
+static bool mmio_nosmt __ro_after_init = false;
+
+static const char * const mmio_strings[] = {
+       [MMIO_MITIGATION_OFF]           = "Vulnerable",
+       [MMIO_MITIGATION_UCODE_NEEDED]  = "Vulnerable: Clear CPU buffers attempted, no microcode",
+       [MMIO_MITIGATION_VERW]          = "Mitigation: Clear CPU buffers",
+};
+
+static void __init mmio_select_mitigation(void)
+{
+       u64 ia32_cap;
+
+       if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
+           cpu_mitigations_off()) {
+               mmio_mitigation = MMIO_MITIGATION_OFF;
+               return;
+       }
+
+       if (mmio_mitigation == MMIO_MITIGATION_OFF)
+               return;
+
+       ia32_cap = x86_read_arch_cap_msr();
+
+       /*
+        * Enable CPU buffer clear mitigation for host and VMM, if also affected
+        * by MDS or TAA. Otherwise, enable mitigation for VMM only.
+        */
+       if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
+                                             boot_cpu_has(X86_FEATURE_RTM)))
+               static_branch_enable(&mds_user_clear);
+       else
+               static_branch_enable(&mmio_stale_data_clear);
+
+       /*
+        * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
+        * be propagated to uncore buffers, clearing the Fill buffers on idle
+        * is required irrespective of SMT state.
+        */
+       if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
+               static_branch_enable(&mds_idle_clear);
+
+       /*
+        * Check if the system has the right microcode.
+        *
+        * CPU Fill buffer clear mitigation is enumerated by either an explicit
+        * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
+        * affected systems.
+        */
+       if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
+           (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
+            boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
+            !(ia32_cap & ARCH_CAP_MDS_NO)))
+               mmio_mitigation = MMIO_MITIGATION_VERW;
+       else
+               mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
+
+       if (mmio_nosmt || cpu_mitigations_auto_nosmt())
+               cpu_smt_disable(false);
+}
+
+static int __init mmio_stale_data_parse_cmdline(char *str)
+{
+       if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
+               return 0;
+
+       if (!str)
+               return -EINVAL;
+
+       if (!strcmp(str, "off")) {
+               mmio_mitigation = MMIO_MITIGATION_OFF;
+       } else if (!strcmp(str, "full")) {
+               mmio_mitigation = MMIO_MITIGATION_VERW;
+       } else if (!strcmp(str, "full,nosmt")) {
+               mmio_mitigation = MMIO_MITIGATION_VERW;
+               mmio_nosmt = true;
+       }
+
+       return 0;
+}
+early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
+
+#undef pr_fmt
+#define pr_fmt(fmt)     "" fmt
+
+static void __init md_clear_update_mitigation(void)
+{
+       if (cpu_mitigations_off())
+               return;
+
+       if (!static_key_enabled(&mds_user_clear))
+               goto out;
+
+       /*
+        * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
+        * mitigation, if necessary.
+        */
+       if (mds_mitigation == MDS_MITIGATION_OFF &&
+           boot_cpu_has_bug(X86_BUG_MDS)) {
+               mds_mitigation = MDS_MITIGATION_FULL;
+               mds_select_mitigation();
+       }
+       if (taa_mitigation == TAA_MITIGATION_OFF &&
+           boot_cpu_has_bug(X86_BUG_TAA)) {
+               taa_mitigation = TAA_MITIGATION_VERW;
+               taa_select_mitigation();
+       }
+       if (mmio_mitigation == MMIO_MITIGATION_OFF &&
+           boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
+               mmio_mitigation = MMIO_MITIGATION_VERW;
+               mmio_select_mitigation();
+       }
+out:
+       if (boot_cpu_has_bug(X86_BUG_MDS))
+               pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
+       if (boot_cpu_has_bug(X86_BUG_TAA))
+               pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
+       if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
+               pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
+}
+
+static void __init md_clear_select_mitigation(void)
+{
+       mds_select_mitigation();
+       taa_select_mitigation();
+       mmio_select_mitigation();
+
+       /*
+        * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
+        * and print their mitigation after MDS, TAA and MMIO Stale Data
+        * mitigation selection is done.
+        */
+       md_clear_update_mitigation();
+}
+
+#undef pr_fmt
 #define pr_fmt(fmt)    "SRBDS: " fmt
 
 enum srbds_mitigations {
@@ -478,11 +622,13 @@ static void __init srbds_select_mitigation(void)
                return;
 
        /*
-        * Check to see if this is one of the MDS_NO systems supporting
-        * TSX that are only exposed to SRBDS when TSX is enabled.
+        * Check to see if this is one of the MDS_NO systems supporting TSX that
+        * are only exposed to SRBDS when TSX is enabled or when CPU is affected
+        * by Processor MMIO Stale Data vulnerability.
         */
        ia32_cap = x86_read_arch_cap_msr();
-       if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM))
+       if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
+           !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
                srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
        else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
                srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
@@ -626,12 +772,180 @@ static int __init nospectre_v1_cmdline(char *str)
 }
 early_param("nospectre_v1", nospectre_v1_cmdline);
 
-#undef pr_fmt
-#define pr_fmt(fmt)     "Spectre V2 : " fmt
-
 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
        SPECTRE_V2_NONE;
 
+#undef pr_fmt
+#define pr_fmt(fmt)     "RETBleed: " fmt
+
+enum retbleed_mitigation {
+       RETBLEED_MITIGATION_NONE,
+       RETBLEED_MITIGATION_UNRET,
+       RETBLEED_MITIGATION_IBPB,
+       RETBLEED_MITIGATION_IBRS,
+       RETBLEED_MITIGATION_EIBRS,
+};
+
+enum retbleed_mitigation_cmd {
+       RETBLEED_CMD_OFF,
+       RETBLEED_CMD_AUTO,
+       RETBLEED_CMD_UNRET,
+       RETBLEED_CMD_IBPB,
+};
+
+static const char * const retbleed_strings[] = {
+       [RETBLEED_MITIGATION_NONE]      = "Vulnerable",
+       [RETBLEED_MITIGATION_UNRET]     = "Mitigation: untrained return thunk",
+       [RETBLEED_MITIGATION_IBPB]      = "Mitigation: IBPB",
+       [RETBLEED_MITIGATION_IBRS]      = "Mitigation: IBRS",
+       [RETBLEED_MITIGATION_EIBRS]     = "Mitigation: Enhanced IBRS",
+};
+
+static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
+       RETBLEED_MITIGATION_NONE;
+static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
+       RETBLEED_CMD_AUTO;
+
+static int __ro_after_init retbleed_nosmt = false;
+
+static int __init retbleed_parse_cmdline(char *str)
+{
+       if (!str)
+               return -EINVAL;
+
+       while (str) {
+               char *next = strchr(str, ',');
+               if (next) {
+                       *next = 0;
+                       next++;
+               }
+
+               if (!strcmp(str, "off")) {
+                       retbleed_cmd = RETBLEED_CMD_OFF;
+               } else if (!strcmp(str, "auto")) {
+                       retbleed_cmd = RETBLEED_CMD_AUTO;
+               } else if (!strcmp(str, "unret")) {
+                       retbleed_cmd = RETBLEED_CMD_UNRET;
+               } else if (!strcmp(str, "ibpb")) {
+                       retbleed_cmd = RETBLEED_CMD_IBPB;
+               } else if (!strcmp(str, "nosmt")) {
+                       retbleed_nosmt = true;
+               } else {
+                       pr_err("Ignoring unknown retbleed option (%s).", str);
+               }
+
+               str = next;
+       }
+
+       return 0;
+}
+early_param("retbleed", retbleed_parse_cmdline);
+
+#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
+#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
+
+static void __init retbleed_select_mitigation(void)
+{
+       bool mitigate_smt = false;
+
+       if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
+               return;
+
+       switch (retbleed_cmd) {
+       case RETBLEED_CMD_OFF:
+               return;
+
+       case RETBLEED_CMD_UNRET:
+               if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
+                       retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+               } else {
+                       pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n");
+                       goto do_cmd_auto;
+               }
+               break;
+
+       case RETBLEED_CMD_IBPB:
+               if (!boot_cpu_has(X86_FEATURE_IBPB)) {
+                       pr_err("WARNING: CPU does not support IBPB.\n");
+                       goto do_cmd_auto;
+               } else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
+                       retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
+               } else {
+                       pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
+                       goto do_cmd_auto;
+               }
+               break;
+
+do_cmd_auto:
+       case RETBLEED_CMD_AUTO:
+       default:
+               if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+                   boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+                       if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
+                               retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+                       else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB))
+                               retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
+               }
+
+               /*
+                * The Intel mitigation (IBRS or eIBRS) was already selected in
+                * spectre_v2_select_mitigation().  'retbleed_mitigation' will
+                * be set accordingly below.
+                */
+
+               break;
+       }
+
+       switch (retbleed_mitigation) {
+       case RETBLEED_MITIGATION_UNRET:
+               setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+               setup_force_cpu_cap(X86_FEATURE_UNRET);
+
+               if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+                   boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+                       pr_err(RETBLEED_UNTRAIN_MSG);
+
+               mitigate_smt = true;
+               break;
+
+       case RETBLEED_MITIGATION_IBPB:
+               setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
+               mitigate_smt = true;
+               break;
+
+       default:
+               break;
+       }
+
+       if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
+           (retbleed_nosmt || cpu_mitigations_auto_nosmt()))
+               cpu_smt_disable(false);
+
+       /*
+        * Let IBRS trump all on Intel without affecting the effects of the
+        * retbleed= cmdline option.
+        */
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+               switch (spectre_v2_enabled) {
+               case SPECTRE_V2_IBRS:
+                       retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
+                       break;
+               case SPECTRE_V2_EIBRS:
+               case SPECTRE_V2_EIBRS_RETPOLINE:
+               case SPECTRE_V2_EIBRS_LFENCE:
+                       retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
+                       break;
+               default:
+                       pr_err(RETBLEED_INTEL_MSG);
+               }
+       }
+
+       pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
+}
+
+#undef pr_fmt
+#define pr_fmt(fmt)     "Spectre V2 : " fmt
+
 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
        SPECTRE_V2_USER_NONE;
 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
@@ -661,6 +975,7 @@ static inline const char *spectre_v2_module_string(void) { return ""; }
 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
+#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
 
 #ifdef CONFIG_BPF_SYSCALL
 void unpriv_ebpf_notify(int new_state)
@@ -702,6 +1017,7 @@ enum spectre_v2_mitigation_cmd {
        SPECTRE_V2_CMD_EIBRS,
        SPECTRE_V2_CMD_EIBRS_RETPOLINE,
        SPECTRE_V2_CMD_EIBRS_LFENCE,
+       SPECTRE_V2_CMD_IBRS,
 };
 
 enum spectre_v2_user_cmd {
@@ -742,13 +1058,15 @@ static void __init spec_v2_user_print_cond(const char *reason, bool secure)
                pr_info("spectre_v2_user=%s forced on command line.\n", reason);
 }
 
+static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
+
 static enum spectre_v2_user_cmd __init
-spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
+spectre_v2_parse_user_cmdline(void)
 {
        char arg[20];
        int ret, i;
 
-       switch (v2_cmd) {
+       switch (spectre_v2_cmd) {
        case SPECTRE_V2_CMD_NONE:
                return SPECTRE_V2_USER_CMD_NONE;
        case SPECTRE_V2_CMD_FORCE:
@@ -774,15 +1092,16 @@ spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
        return SPECTRE_V2_USER_CMD_AUTO;
 }
 
-static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
+static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
 {
-       return (mode == SPECTRE_V2_EIBRS ||
-               mode == SPECTRE_V2_EIBRS_RETPOLINE ||
-               mode == SPECTRE_V2_EIBRS_LFENCE);
+       return mode == SPECTRE_V2_IBRS ||
+              mode == SPECTRE_V2_EIBRS ||
+              mode == SPECTRE_V2_EIBRS_RETPOLINE ||
+              mode == SPECTRE_V2_EIBRS_LFENCE;
 }
 
 static void __init
-spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
+spectre_v2_user_select_mitigation(void)
 {
        enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
        bool smt_possible = IS_ENABLED(CONFIG_SMP);
@@ -795,7 +1114,7 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
            cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
                smt_possible = false;
 
-       cmd = spectre_v2_parse_user_cmdline(v2_cmd);
+       cmd = spectre_v2_parse_user_cmdline();
        switch (cmd) {
        case SPECTRE_V2_USER_CMD_NONE:
                goto set_mode;
@@ -843,12 +1162,12 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
        }
 
        /*
-        * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
-        * required.
+        * If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible,
+        * STIBP is not required.
         */
        if (!boot_cpu_has(X86_FEATURE_STIBP) ||
            !smt_possible ||
-           spectre_v2_in_eibrs_mode(spectre_v2_enabled))
+           spectre_v2_in_ibrs_mode(spectre_v2_enabled))
                return;
 
        /*
@@ -860,6 +1179,13 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
            boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
                mode = SPECTRE_V2_USER_STRICT_PREFERRED;
 
+       if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET) {
+               if (mode != SPECTRE_V2_USER_STRICT &&
+                   mode != SPECTRE_V2_USER_STRICT_PREFERRED)
+                       pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
+               mode = SPECTRE_V2_USER_STRICT_PREFERRED;
+       }
+
        spectre_v2_user_stibp = mode;
 
 set_mode:
@@ -873,6 +1199,7 @@ static const char * const spectre_v2_strings[] = {
        [SPECTRE_V2_EIBRS]                      = "Mitigation: Enhanced IBRS",
        [SPECTRE_V2_EIBRS_LFENCE]               = "Mitigation: Enhanced IBRS + LFENCE",
        [SPECTRE_V2_EIBRS_RETPOLINE]            = "Mitigation: Enhanced IBRS + Retpolines",
+       [SPECTRE_V2_IBRS]                       = "Mitigation: IBRS",
 };
 
 static const struct {
@@ -890,6 +1217,7 @@ static const struct {
        { "eibrs,lfence",       SPECTRE_V2_CMD_EIBRS_LFENCE,      false },
        { "eibrs,retpoline",    SPECTRE_V2_CMD_EIBRS_RETPOLINE,   false },
        { "auto",               SPECTRE_V2_CMD_AUTO,              false },
+       { "ibrs",               SPECTRE_V2_CMD_IBRS,              false },
 };
 
 static void __init spec_v2_print_cond(const char *reason, bool secure)
@@ -952,6 +1280,30 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
                return SPECTRE_V2_CMD_AUTO;
        }
 
+       if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) {
+               pr_err("%s selected but not compiled in. Switching to AUTO select\n",
+                      mitigation_options[i].option);
+               return SPECTRE_V2_CMD_AUTO;
+       }
+
+       if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
+               pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
+                      mitigation_options[i].option);
+               return SPECTRE_V2_CMD_AUTO;
+       }
+
+       if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
+               pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
+                      mitigation_options[i].option);
+               return SPECTRE_V2_CMD_AUTO;
+       }
+
+       if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) {
+               pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
+                      mitigation_options[i].option);
+               return SPECTRE_V2_CMD_AUTO;
+       }
+
        spec_v2_print_cond(mitigation_options[i].option,
                           mitigation_options[i].secure);
        return cmd;
@@ -967,6 +1319,22 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
        return SPECTRE_V2_RETPOLINE;
 }
 
+/* Disable in-kernel use of non-RSB RET predictors */
+static void __init spec_ctrl_disable_kernel_rrsba(void)
+{
+       u64 ia32_cap;
+
+       if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
+               return;
+
+       ia32_cap = x86_read_arch_cap_msr();
+
+       if (ia32_cap & ARCH_CAP_RRSBA) {
+               x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
+               write_spec_ctrl_current(x86_spec_ctrl_base, true);
+       }
+}
+
 static void __init spectre_v2_select_mitigation(void)
 {
        enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -991,6 +1359,15 @@ static void __init spectre_v2_select_mitigation(void)
                        break;
                }
 
+               if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) &&
+                   boot_cpu_has_bug(X86_BUG_RETBLEED) &&
+                   retbleed_cmd != RETBLEED_CMD_OFF &&
+                   boot_cpu_has(X86_FEATURE_IBRS) &&
+                   boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+                       mode = SPECTRE_V2_IBRS;
+                       break;
+               }
+
                mode = spectre_v2_select_retpoline();
                break;
 
@@ -1007,6 +1384,10 @@ static void __init spectre_v2_select_mitigation(void)
                mode = spectre_v2_select_retpoline();
                break;
 
+       case SPECTRE_V2_CMD_IBRS:
+               mode = SPECTRE_V2_IBRS;
+               break;
+
        case SPECTRE_V2_CMD_EIBRS:
                mode = SPECTRE_V2_EIBRS;
                break;
@@ -1023,10 +1404,9 @@ static void __init spectre_v2_select_mitigation(void)
        if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
                pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
 
-       if (spectre_v2_in_eibrs_mode(mode)) {
-               /* Force it so VMEXIT will restore correctly */
+       if (spectre_v2_in_ibrs_mode(mode)) {
                x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
-               wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+               write_spec_ctrl_current(x86_spec_ctrl_base, true);
        }
 
        switch (mode) {
@@ -1034,6 +1414,12 @@ static void __init spectre_v2_select_mitigation(void)
        case SPECTRE_V2_EIBRS:
                break;
 
+       case SPECTRE_V2_IBRS:
+               setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
+               if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
+                       pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
+               break;
+
        case SPECTRE_V2_LFENCE:
        case SPECTRE_V2_EIBRS_LFENCE:
                setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
@@ -1045,43 +1431,117 @@ static void __init spectre_v2_select_mitigation(void)
                break;
        }
 
+       /*
+        * Disable alternate RSB predictions in kernel when indirect CALLs and
+        * JMPs gets protection against BHI and Intramode-BTI, but RET
+        * prediction from a non-RSB predictor is still a risk.
+        */
+       if (mode == SPECTRE_V2_EIBRS_LFENCE ||
+           mode == SPECTRE_V2_EIBRS_RETPOLINE ||
+           mode == SPECTRE_V2_RETPOLINE)
+               spec_ctrl_disable_kernel_rrsba();
+
        spectre_v2_enabled = mode;
        pr_info("%s\n", spectre_v2_strings[mode]);
 
        /*
-        * If spectre v2 protection has been enabled, unconditionally fill
-        * RSB during a context switch; this protects against two independent
-        * issues:
+        * If Spectre v2 protection has been enabled, fill the RSB during a
+        * context switch.  In general there are two types of RSB attacks
+        * across context switches, for which the CALLs/RETs may be unbalanced.
+        *
+        * 1) RSB underflow
+        *
+        *    Some Intel parts have "bottomless RSB".  When the RSB is empty,
+        *    speculated return targets may come from the branch predictor,
+        *    which could have a user-poisoned BTB or BHB entry.
+        *
+        *    AMD has it even worse: *all* returns are speculated from the BTB,
+        *    regardless of the state of the RSB.
+        *
+        *    When IBRS or eIBRS is enabled, the "user -> kernel" attack
+        *    scenario is mitigated by the IBRS branch prediction isolation
+        *    properties, so the RSB buffer filling wouldn't be necessary to
+        *    protect against this type of attack.
+        *
+        *    The "user -> user" attack scenario is mitigated by RSB filling.
+        *
+        * 2) Poisoned RSB entry
+        *
+        *    If the 'next' in-kernel return stack is shorter than 'prev',
+        *    'next' could be tricked into speculating with a user-poisoned RSB
+        *    entry.
+        *
+        *    The "user -> kernel" attack scenario is mitigated by SMEP and
+        *    eIBRS.
         *
-        *      - RSB underflow (and switch to BTB) on Skylake+
-        *      - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
+        *    The "user -> user" scenario, also known as SpectreBHB, requires
+        *    RSB clearing.
+        *
+        * So to mitigate all cases, unconditionally fill RSB on context
+        * switches.
+        *
+        * FIXME: Is this pointless for retbleed-affected AMD?
         */
        setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
        pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
 
        /*
-        * Retpoline means the kernel is safe because it has no indirect
-        * branches. Enhanced IBRS protects firmware too, so, enable restricted
-        * speculation around firmware calls only when Enhanced IBRS isn't
-        * supported.
+        * Similar to context switches, there are two types of RSB attacks
+        * after vmexit:
+        *
+        * 1) RSB underflow
+        *
+        * 2) Poisoned RSB entry
+        *
+        * When retpoline is enabled, both are mitigated by filling/clearing
+        * the RSB.
+        *
+        * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
+        * prediction isolation protections, RSB still needs to be cleared
+        * because of #2.  Note that SMEP provides no protection here, unlike
+        * user-space-poisoned RSB entries.
+        *
+        * eIBRS, on the other hand, has RSB-poisoning protections, so it
+        * doesn't need RSB clearing after vmexit.
+        */
+       if (boot_cpu_has(X86_FEATURE_RETPOLINE) ||
+           boot_cpu_has(X86_FEATURE_KERNEL_IBRS))
+               setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+
+       /*
+        * Retpoline protects the kernel, but doesn't protect firmware.  IBRS
+        * and Enhanced IBRS protect firmware too, so enable IBRS around
+        * firmware calls only when IBRS / Enhanced IBRS aren't otherwise
+        * enabled.
         *
         * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
         * the user might select retpoline on the kernel command line and if
         * the CPU supports Enhanced IBRS, kernel might un-intentionally not
         * enable IBRS around firmware calls.
         */
-       if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) {
+       if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
+           boot_cpu_has(X86_FEATURE_IBPB) &&
+           (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+            boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
+
+               if (retbleed_cmd != RETBLEED_CMD_IBPB) {
+                       setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
+                       pr_info("Enabling Speculation Barrier for firmware calls\n");
+               }
+
+       } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
                setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
                pr_info("Enabling Restricted Speculation for firmware calls\n");
        }
 
        /* Set up IBPB and STIBP depending on the general spectre V2 command */
-       spectre_v2_user_select_mitigation(cmd);
+       spectre_v2_cmd = cmd;
 }
 
 static void update_stibp_msr(void * __unused)
 {
-       wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+       u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
+       write_spec_ctrl_current(val, true);
 }
 
 /* Update x86_spec_ctrl_base in case SMT state changed. */
@@ -1116,6 +1576,8 @@ static void update_indir_branch_cond(void)
 /* Update the static key controlling the MDS CPU buffer clear in idle */
 static void update_mds_branch_idle(void)
 {
+       u64 ia32_cap = x86_read_arch_cap_msr();
+
        /*
         * Enable the idle clearing if SMT is active on CPUs which are
         * affected only by MSBDS and not any other MDS variant.
@@ -1127,14 +1589,17 @@ static void update_mds_branch_idle(void)
        if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
                return;
 
-       if (sched_smt_active())
+       if (sched_smt_active()) {
                static_branch_enable(&mds_idle_clear);
-       else
+       } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
+                  (ia32_cap & ARCH_CAP_FBSDP_NO)) {
                static_branch_disable(&mds_idle_clear);
+       }
 }
 
 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
+#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
 
 void cpu_bugs_smt_update(void)
 {
@@ -1179,6 +1644,16 @@ void cpu_bugs_smt_update(void)
                break;
        }
 
+       switch (mmio_mitigation) {
+       case MMIO_MITIGATION_VERW:
+       case MMIO_MITIGATION_UCODE_NEEDED:
+               if (sched_smt_active())
+                       pr_warn_once(MMIO_MSG_SMT);
+               break;
+       case MMIO_MITIGATION_OFF:
+               break;
+       }
+
        mutex_unlock(&spec_ctrl_mutex);
 }
 
@@ -1283,16 +1758,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
        }
 
        /*
-        * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
-        * bit in the mask to allow guests to use the mitigation even in the
-        * case where the host does not enable it.
-        */
-       if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
-           static_cpu_has(X86_FEATURE_AMD_SSBD)) {
-               x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
-       }
-
-       /*
         * We have three CPU feature flags that are in play here:
         *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
         *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
@@ -1309,7 +1774,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
                        x86_amd_ssb_disable();
                } else {
                        x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
-                       wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+                       write_spec_ctrl_current(x86_spec_ctrl_base, true);
                }
        }
 
@@ -1560,7 +2025,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
 void x86_spec_ctrl_setup_ap(void)
 {
        if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
-               wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+               write_spec_ctrl_current(x86_spec_ctrl_base, true);
 
        if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
                x86_amd_ssb_disable();
@@ -1781,9 +2246,23 @@ static ssize_t tsx_async_abort_show_state(char *buf)
                       sched_smt_active() ? "vulnerable" : "disabled");
 }
 
+static ssize_t mmio_stale_data_show_state(char *buf)
+{
+       if (mmio_mitigation == MMIO_MITIGATION_OFF)
+               return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
+
+       if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+               return sysfs_emit(buf, "%s; SMT Host state unknown\n",
+                                 mmio_strings[mmio_mitigation]);
+       }
+
+       return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
+                         sched_smt_active() ? "vulnerable" : "disabled");
+}
+
 static char *stibp_state(void)
 {
-       if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
+       if (spectre_v2_in_ibrs_mode(spectre_v2_enabled))
                return "";
 
        switch (spectre_v2_user_stibp) {
@@ -1839,6 +2318,24 @@ static ssize_t srbds_show_state(char *buf)
        return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
 }
 
+static ssize_t retbleed_show_state(char *buf)
+{
+       if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET) {
+           if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+               boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+                   return sprintf(buf, "Vulnerable: untrained return thunk on non-Zen uarch\n");
+
+           return sprintf(buf, "%s; SMT %s\n",
+                          retbleed_strings[retbleed_mitigation],
+                          !sched_smt_active() ? "disabled" :
+                          spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+                          spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
+                          "enabled with STIBP protection" : "vulnerable");
+       }
+
+       return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
+}
+
 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
                               char *buf, unsigned int bug)
 {
@@ -1881,6 +2378,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
        case X86_BUG_SRBDS:
                return srbds_show_state(buf);
 
+       case X86_BUG_MMIO_STALE_DATA:
+               return mmio_stale_data_show_state(buf);
+
+       case X86_BUG_RETBLEED:
+               return retbleed_show_state(buf);
+
        default:
                break;
        }
@@ -1932,4 +2435,14 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
 {
        return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
 }
+
+ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
+}
+
+ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
+}
 #endif
index c296cb1..736262a 100644 (file)
@@ -1205,24 +1205,60 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
        {}
 };
 
+#define VULNBL(vendor, family, model, blacklist)       \
+       X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
+
 #define VULNBL_INTEL_STEPPINGS(model, steppings, issues)                  \
        X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6,             \
                                            INTEL_FAM6_##model, steppings, \
                                            X86_FEATURE_ANY, issues)
 
+#define VULNBL_AMD(family, blacklist)          \
+       VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
+
+#define VULNBL_HYGON(family, blacklist)                \
+       VULNBL(HYGON, family, X86_MODEL_ANY, blacklist)
+
 #define SRBDS          BIT(0)
+/* CPU is affected by X86_BUG_MMIO_STALE_DATA */
+#define MMIO           BIT(1)
+/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
+#define MMIO_SBDS      BIT(2)
+/* CPU is affected by RETbleed, speculating where you would not expect it */
+#define RETBLEED       BIT(3)
 
 static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
        VULNBL_INTEL_STEPPINGS(IVYBRIDGE,       X86_STEPPING_ANY,               SRBDS),
        VULNBL_INTEL_STEPPINGS(HASWELL,         X86_STEPPING_ANY,               SRBDS),
        VULNBL_INTEL_STEPPINGS(HASWELL_L,       X86_STEPPING_ANY,               SRBDS),
        VULNBL_INTEL_STEPPINGS(HASWELL_G,       X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(HASWELL_X,       X86_STEPPING_ANY,               MMIO),
+       VULNBL_INTEL_STEPPINGS(BROADWELL_D,     X86_STEPPING_ANY,               MMIO),
        VULNBL_INTEL_STEPPINGS(BROADWELL_G,     X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(BROADWELL_X,     X86_STEPPING_ANY,               MMIO),
        VULNBL_INTEL_STEPPINGS(BROADWELL,       X86_STEPPING_ANY,               SRBDS),
-       VULNBL_INTEL_STEPPINGS(SKYLAKE_L,       X86_STEPPING_ANY,               SRBDS),
-       VULNBL_INTEL_STEPPINGS(SKYLAKE,         X86_STEPPING_ANY,               SRBDS),
-       VULNBL_INTEL_STEPPINGS(KABYLAKE_L,      X86_STEPPINGS(0x0, 0xC),        SRBDS),
-       VULNBL_INTEL_STEPPINGS(KABYLAKE,        X86_STEPPINGS(0x0, 0xD),        SRBDS),
+       VULNBL_INTEL_STEPPINGS(SKYLAKE_L,       X86_STEPPING_ANY,               SRBDS | MMIO | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(SKYLAKE_X,       X86_STEPPING_ANY,               MMIO | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(SKYLAKE,         X86_STEPPING_ANY,               SRBDS | MMIO | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(KABYLAKE_L,      X86_STEPPING_ANY,               SRBDS | MMIO | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(KABYLAKE,        X86_STEPPING_ANY,               SRBDS | MMIO | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(CANNONLAKE_L,    X86_STEPPING_ANY,               RETBLEED),
+       VULNBL_INTEL_STEPPINGS(ICELAKE_L,       X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(ICELAKE_D,       X86_STEPPING_ANY,               MMIO),
+       VULNBL_INTEL_STEPPINGS(ICELAKE_X,       X86_STEPPING_ANY,               MMIO),
+       VULNBL_INTEL_STEPPINGS(COMETLAKE,       X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(COMETLAKE_L,     X86_STEPPINGS(0x0, 0x0),        MMIO | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(COMETLAKE_L,     X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(LAKEFIELD,       X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(ROCKETLAKE,      X86_STEPPING_ANY,               MMIO | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(ATOM_TREMONT,    X86_STEPPING_ANY,               MMIO | MMIO_SBDS),
+       VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D,  X86_STEPPING_ANY,               MMIO),
+       VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L,  X86_STEPPING_ANY,               MMIO | MMIO_SBDS),
+
+       VULNBL_AMD(0x15, RETBLEED),
+       VULNBL_AMD(0x16, RETBLEED),
+       VULNBL_AMD(0x17, RETBLEED),
+       VULNBL_HYGON(0x18, RETBLEED),
        {}
 };
 
@@ -1243,6 +1279,13 @@ u64 x86_read_arch_cap_msr(void)
        return ia32_cap;
 }
 
+static bool arch_cap_mmio_immune(u64 ia32_cap)
+{
+       return (ia32_cap & ARCH_CAP_FBSDP_NO &&
+               ia32_cap & ARCH_CAP_PSDP_NO &&
+               ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
+}
+
 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 {
        u64 ia32_cap = x86_read_arch_cap_msr();
@@ -1296,12 +1339,32 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
        /*
         * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
         * in the vulnerability blacklist.
+        *
+        * Some of the implications and mitigation of Shared Buffers Data
+        * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as
+        * SRBDS.
         */
        if ((cpu_has(c, X86_FEATURE_RDRAND) ||
             cpu_has(c, X86_FEATURE_RDSEED)) &&
-           cpu_matches(cpu_vuln_blacklist, SRBDS))
+           cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS))
                    setup_force_cpu_bug(X86_BUG_SRBDS);
 
+       /*
+        * Processor MMIO Stale Data bug enumeration
+        *
+        * Affected CPU list is generally enough to enumerate the vulnerability,
+        * but for virtualization case check for ARCH_CAP MSR bits also, VMM may
+        * not want the guest to enumerate the bug.
+        */
+       if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
+           !arch_cap_mmio_immune(ia32_cap))
+               setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
+
+       if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
+               if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
+                       setup_force_cpu_bug(X86_BUG_RETBLEED);
+       }
+
        if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
                return;
 
index 2a8e584..7c9b589 100644 (file)
@@ -61,6 +61,8 @@ static inline void tsx_init(void) { }
 static inline void tsx_ap_init(void) { }
 #endif /* CONFIG_CPU_SUP_INTEL */
 
+extern void init_spectral_chicken(struct cpuinfo_x86 *c);
+
 extern void get_cpu_cap(struct cpuinfo_x86 *c);
 extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
 extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
index 3fcdda4..21fd425 100644 (file)
@@ -302,6 +302,12 @@ static void init_hygon(struct cpuinfo_x86 *c)
        /* get apicid instead of initial apic id from cpuid */
        c->apicid = hard_smp_processor_id();
 
+       /*
+        * XXX someone from Hygon needs to confirm this DTRT
+        *
+       init_spectral_chicken(c);
+        */
+
        set_cpu_cap(c, X86_FEATURE_ZEN);
        set_cpu_cap(c, X86_FEATURE_CPB);
 
index fd5dead..663f6e6 100644 (file)
@@ -682,9 +682,9 @@ static void init_intel(struct cpuinfo_x86 *c)
                unsigned int l1, l2;
 
                rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
-               if (!(l1 & (1<<11)))
+               if (!(l1 & MSR_IA32_MISC_ENABLE_BTS_UNAVAIL))
                        set_cpu_cap(c, X86_FEATURE_BTS);
-               if (!(l1 & (1<<12)))
+               if (!(l1 & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL))
                        set_cpu_cap(c, X86_FEATURE_PEBS);
        }
 
index 5fbd7ff..12cf2e7 100644 (file)
@@ -33,6 +33,8 @@
 
 #include "internal.h"
 
+static bool hw_injection_possible = true;
+
 /*
  * Collect all the MCi_XXX settings
  */
@@ -339,6 +341,8 @@ static int __set_inj(const char *buf)
 
        for (i = 0; i < N_INJ_TYPES; i++) {
                if (!strncmp(flags_options[i], buf, strlen(flags_options[i]))) {
+                       if (i > SW_INJ && !hw_injection_possible)
+                               continue;
                        inj_type = i;
                        return 0;
                }
@@ -717,11 +721,54 @@ static void __init debugfs_init(void)
                                    &i_mce, dfs_fls[i].fops);
 }
 
+static void check_hw_inj_possible(void)
+{
+       int cpu;
+       u8 bank;
+
+       /*
+        * This behavior exists only on SMCA systems though its not directly
+        * related to SMCA.
+        */
+       if (!cpu_feature_enabled(X86_FEATURE_SMCA))
+               return;
+
+       cpu = get_cpu();
+
+       for (bank = 0; bank < MAX_NR_BANKS; ++bank) {
+               u64 status = MCI_STATUS_VAL, ipid;
+
+               /* Check whether bank is populated */
+               rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), ipid);
+               if (!ipid)
+                       continue;
+
+               toggle_hw_mce_inject(cpu, true);
+
+               wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), status);
+               rdmsrl_safe(mca_msr_reg(bank, MCA_STATUS), &status);
+
+               if (!status) {
+                       hw_injection_possible = false;
+                       pr_warn("Platform does not allow *hardware* error injection."
+                               "Try using APEI EINJ instead.\n");
+               }
+
+               toggle_hw_mce_inject(cpu, false);
+
+               break;
+       }
+
+       put_cpu();
+}
+
 static int __init inject_init(void)
 {
        if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
                return -ENOMEM;
 
+       check_hw_inj_possible();
+
        debugfs_init();
 
        register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0, "mce_notify");
index 4ae0e60..7e03f5b 100644 (file)
@@ -211,7 +211,7 @@ noinstr u64 mce_rdmsrl(u32 msr);
 
 static __always_inline u32 mca_msr_reg(int bank, enum mca_msr reg)
 {
-       if (mce_flags.smca) {
+       if (cpu_feature_enabled(X86_FEATURE_SMCA)) {
                switch (reg) {
                case MCA_CTL:    return MSR_AMD64_SMCA_MCx_CTL(bank);
                case MCA_ADDR:   return MSR_AMD64_SMCA_MCx_ADDR(bank);
index dbaa832..fd44b54 100644 (file)
@@ -27,6 +27,7 @@ static const struct cpuid_bit cpuid_bits[] = {
        { X86_FEATURE_APERFMPERF,       CPUID_ECX,  0, 0x00000006, 0 },
        { X86_FEATURE_EPB,              CPUID_ECX,  3, 0x00000006, 0 },
        { X86_FEATURE_INTEL_PPIN,       CPUID_EBX,  0, 0x00000007, 1 },
+       { X86_FEATURE_RRSBA_CTRL,       CPUID_EDX,  2, 0x00000007, 2 },
        { X86_FEATURE_CQM_LLC,          CPUID_EDX,  1, 0x0000000f, 0 },
        { X86_FEATURE_CQM_OCCUP_LLC,    CPUID_EDX,  0, 0x0000000f, 1 },
        { X86_FEATURE_CQM_MBM_TOTAL,    CPUID_EDX,  1, 0x0000000f, 1 },
index c04b933..02039ec 100644 (file)
@@ -476,8 +476,8 @@ static bool __init vmware_legacy_x2apic_available(void)
 {
        uint32_t eax, ebx, ecx, edx;
        VMWARE_CMD(GETVCPU_INFO, eax, ebx, ecx, edx);
-       return (eax & (1 << VMWARE_CMD_VCPU_RESERVED)) == 0 &&
-              (eax & (1 << VMWARE_CMD_LEGACY_X2APIC)) != 0;
+       return !(eax & BIT(VMWARE_CMD_VCPU_RESERVED)) &&
+               (eax & BIT(VMWARE_CMD_LEGACY_X2APIC));
 }
 
 #ifdef CONFIG_AMD_MEM_ENCRYPT
index f267205..9dac246 100644 (file)
@@ -1017,10 +1017,10 @@ void __init e820__reserve_setup_data(void)
                e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
 
                /*
-                * SETUP_EFI is supplied by kexec and does not need to be
-                * reserved.
+                * SETUP_EFI and SETUP_IMA are supplied by kexec and do not need
+                * to be reserved.
                 */
-               if (data->type != SETUP_EFI)
+               if (data->type != SETUP_EFI && data->type != SETUP_IMA)
                        e820__range_update_kexec(pa_data,
                                                 sizeof(*data) + data->len,
                                                 E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
index 0531d6a..3b28c5b 100644 (file)
@@ -851,3 +851,17 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr)
         */
        return 0;
 }
+
+/*
+ * Initialize register state that may prevent from entering low-power idle.
+ * This function will be invoked from the cpuidle driver only when needed.
+ */
+void fpu_idle_fpregs(void)
+{
+       /* Note: AMX_TILE being enabled implies XGETBV1 support */
+       if (cpu_feature_enabled(X86_FEATURE_AMX_TILE) &&
+           (xfeatures_in_use() & XFEATURE_MASK_XTILE)) {
+               tile_release();
+               fpregs_deactivate(&current->thread.fpu);
+       }
+}
index 5b4efc9..24b9fa8 100644 (file)
@@ -301,7 +301,7 @@ union ftrace_op_code_union {
        } __attribute__((packed));
 };
 
-#define RET_SIZE               1 + IS_ENABLED(CONFIG_SLS)
+#define RET_SIZE               (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS))
 
 static unsigned long
 create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
@@ -357,7 +357,10 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
                goto fail;
 
        ip = trampoline + size;
-       memcpy(ip, retq, RET_SIZE);
+       if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
+               __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, &__x86_return_thunk, JMP32_INSN_SIZE);
+       else
+               memcpy(ip, retq, sizeof(retq));
 
        /* No need to test direct calls on created trampolines */
        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
index 4ec1360..dfeb227 100644 (file)
@@ -175,6 +175,7 @@ SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL)
 
        jmp ftrace_epilogue
 SYM_FUNC_END(ftrace_caller);
+STACK_FRAME_NON_STANDARD_FP(ftrace_caller)
 
 SYM_FUNC_START(ftrace_epilogue)
 /*
@@ -282,6 +283,7 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
        jmp     ftrace_epilogue
 
 SYM_FUNC_END(ftrace_regs_caller)
+STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
 
 
 #else /* ! CONFIG_DYNAMIC_FTRACE */
@@ -311,10 +313,14 @@ trace:
        jmp ftrace_stub
 SYM_FUNC_END(__fentry__)
 EXPORT_SYMBOL(__fentry__)
+STACK_FRAME_NON_STANDARD_FP(__fentry__)
+
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-SYM_FUNC_START(return_to_handler)
+SYM_CODE_START(return_to_handler)
+       UNWIND_HINT_EMPTY
+       ANNOTATE_NOENDBR
        subq  $16, %rsp
 
        /* Save the return values */
@@ -339,7 +345,6 @@ SYM_FUNC_START(return_to_handler)
        int3
 .Ldo_rop:
        mov %rdi, (%rsp)
-       UNWIND_HINT_FUNC
        RET
-SYM_FUNC_END(return_to_handler)
+SYM_CODE_END(return_to_handler)
 #endif
index bd4a341..6a3cfaf 100644 (file)
@@ -426,10 +426,12 @@ void __init do_early_exception(struct pt_regs *regs, int trapnr)
 
 /* Don't add a printk in there. printk relies on the PDA which is not initialized 
    yet. */
-static void __init clear_bss(void)
+void __init clear_bss(void)
 {
        memset(__bss_start, 0,
               (unsigned long) __bss_stop - (unsigned long) __bss_start);
+       memset(__brk_base, 0,
+              (unsigned long) __brk_limit - (unsigned long) __brk_base);
 }
 
 static unsigned long get_cmd_line_ptr(void)
index eb8656b..9b7acc9 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/cpufeatures.h>
 #include <asm/percpu.h>
 #include <asm/nops.h>
+#include <asm/nospec-branch.h>
 #include <asm/bootparam.h>
 #include <asm/export.h>
 #include <asm/pgtable_32.h>
index 92c4afa..d860d43 100644 (file)
@@ -389,6 +389,8 @@ SYM_CODE_START_NOALIGN(vc_boot_ghcb)
        UNWIND_HINT_IRET_REGS offset=8
        ENDBR
 
+       ANNOTATE_UNRET_END
+
        /* Build pt_regs */
        PUSH_AND_CLEAR_REGS
 
@@ -448,6 +450,7 @@ SYM_CODE_END(early_idt_handler_array)
 
 SYM_CODE_START_LOCAL(early_idt_handler_common)
        UNWIND_HINT_IRET_REGS offset=16
+       ANNOTATE_UNRET_END
        /*
         * The stack is the hardware frame, an error code or zero, and the
         * vector number.
@@ -497,6 +500,8 @@ SYM_CODE_START_NOALIGN(vc_no_ghcb)
        UNWIND_HINT_IRET_REGS offset=8
        ENDBR
 
+       ANNOTATE_UNRET_END
+
        /* Build pt_regs */
        PUSH_AND_CLEAR_REGS
 
index 170d0fd..b9bdb40 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/mm.h>
 #include <linux/efi.h>
 #include <linux/verification.h>
+#include <linux/random.h>
 
 #include <asm/bootparam.h>
 #include <asm/setup.h>
@@ -110,6 +111,26 @@ static int setup_e820_entries(struct boot_params *params)
        return 0;
 }
 
+enum { RNG_SEED_LENGTH = 32 };
+
+static void
+setup_rng_seed(struct boot_params *params, unsigned long params_load_addr,
+              unsigned int rng_seed_setup_data_offset)
+{
+       struct setup_data *sd = (void *)params + rng_seed_setup_data_offset;
+       unsigned long setup_data_phys;
+
+       if (!rng_is_initialized())
+               return;
+
+       sd->type = SETUP_RNG_SEED;
+       sd->len = RNG_SEED_LENGTH;
+       get_random_bytes(sd->data, RNG_SEED_LENGTH);
+       setup_data_phys = params_load_addr + rng_seed_setup_data_offset;
+       sd->next = params->hdr.setup_data;
+       params->hdr.setup_data = setup_data_phys;
+}
+
 #ifdef CONFIG_EFI
 static int setup_efi_info_memmap(struct boot_params *params,
                                  unsigned long params_load_addr,
@@ -186,11 +207,38 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
 }
 #endif /* CONFIG_EFI */
 
+static void
+setup_ima_state(const struct kimage *image, struct boot_params *params,
+               unsigned long params_load_addr,
+               unsigned int ima_setup_data_offset)
+{
+#ifdef CONFIG_IMA_KEXEC
+       struct setup_data *sd = (void *)params + ima_setup_data_offset;
+       unsigned long setup_data_phys;
+       struct ima_setup_data *ima;
+
+       if (!image->ima_buffer_size)
+               return;
+
+       sd->type = SETUP_IMA;
+       sd->len = sizeof(*ima);
+
+       ima = (void *)sd + sizeof(struct setup_data);
+       ima->addr = image->ima_buffer_addr;
+       ima->size = image->ima_buffer_size;
+
+       /* Add setup data */
+       setup_data_phys = params_load_addr + ima_setup_data_offset;
+       sd->next = params->hdr.setup_data;
+       params->hdr.setup_data = setup_data_phys;
+#endif /* CONFIG_IMA_KEXEC */
+}
+
 static int
 setup_boot_parameters(struct kimage *image, struct boot_params *params,
                      unsigned long params_load_addr,
                      unsigned int efi_map_offset, unsigned int efi_map_sz,
-                     unsigned int efi_setup_data_offset)
+                     unsigned int setup_data_offset)
 {
        unsigned int nr_e820_entries;
        unsigned long long mem_k, start, end;
@@ -245,8 +293,22 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
 #ifdef CONFIG_EFI
        /* Setup EFI state */
        setup_efi_state(params, params_load_addr, efi_map_offset, efi_map_sz,
-                       efi_setup_data_offset);
+                       setup_data_offset);
+       setup_data_offset += sizeof(struct setup_data) +
+                       sizeof(struct efi_setup_data);
 #endif
+
+       if (IS_ENABLED(CONFIG_IMA_KEXEC)) {
+               /* Setup IMA log buffer state */
+               setup_ima_state(image, params, params_load_addr,
+                               setup_data_offset);
+               setup_data_offset += sizeof(struct setup_data) +
+                                    sizeof(struct ima_setup_data);
+       }
+
+       /* Setup RNG seed */
+       setup_rng_seed(params, params_load_addr, setup_data_offset);
+
        /* Setup EDD info */
        memcpy(params->eddbuf, boot_params.eddbuf,
                                EDDMAXNR * sizeof(struct edd_info));
@@ -401,7 +463,13 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
        params_cmdline_sz = ALIGN(params_cmdline_sz, 16);
        kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) +
                                sizeof(struct setup_data) +
-                               sizeof(struct efi_setup_data);
+                               sizeof(struct efi_setup_data) +
+                               sizeof(struct setup_data) +
+                               RNG_SEED_LENGTH;
+
+       if (IS_ENABLED(CONFIG_IMA_KEXEC))
+               kbuf.bufsz += sizeof(struct setup_data) +
+                             sizeof(struct ima_setup_data);
 
        params = kzalloc(kbuf.bufsz, GFP_KERNEL);
        if (!params)
index b98ffcf..67828d9 100644 (file)
@@ -253,7 +253,7 @@ int module_finalize(const Elf_Ehdr *hdr,
 {
        const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
                *para = NULL, *orc = NULL, *orc_ip = NULL,
-               *retpolines = NULL, *ibt_endbr = NULL;
+               *retpolines = NULL, *returns = NULL, *ibt_endbr = NULL;
        char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 
        for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
@@ -271,6 +271,8 @@ int module_finalize(const Elf_Ehdr *hdr,
                        orc_ip = s;
                if (!strcmp(".retpoline_sites", secstrings + s->sh_name))
                        retpolines = s;
+               if (!strcmp(".return_sites", secstrings + s->sh_name))
+                       returns = s;
                if (!strcmp(".ibt_endbr_seal", secstrings + s->sh_name))
                        ibt_endbr = s;
        }
@@ -287,6 +289,10 @@ int module_finalize(const Elf_Ehdr *hdr,
                void *rseg = (void *)retpolines->sh_addr;
                apply_retpolines(rseg, rseg + retpolines->sh_size);
        }
+       if (returns) {
+               void *rseg = (void *)returns->sh_addr;
+               apply_returns(rseg, rseg + returns->sh_size);
+       }
        if (alt) {
                /* patch .altinstructions */
                void *aseg = (void *)alt->sh_addr;
index 6b07faa..23154d2 100644 (file)
@@ -27,6 +27,11 @@ static __init int register_e820_pmem(void)
         * simply here to trigger the module to load on demand.
         */
        pdev = platform_device_alloc("e820_pmem", -1);
-       return platform_device_add(pdev);
+
+       rc = platform_device_add(pdev);
+       if (rc)
+               platform_device_put(pdev);
+
+       return rc;
 }
 device_initcall(register_e820_pmem);
index 9b2772b..58a6ea4 100644 (file)
@@ -600,7 +600,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
        }
 
        if (updmsr)
-               wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+               write_spec_ctrl_current(msr, false);
 }
 
 static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
@@ -810,24 +810,43 @@ static void amd_e400_idle(void)
 }
 
 /*
- * Intel Core2 and older machines prefer MWAIT over HALT for C1.
- * We can't rely on cpuidle installing MWAIT, because it will not load
- * on systems that support only C1 -- so the boot default must be MWAIT.
+ * Prefer MWAIT over HALT if MWAIT is supported, MWAIT_CPUID leaf
+ * exists and whenever MONITOR/MWAIT extensions are present there is at
+ * least one C1 substate.
  *
- * Some AMD machines are the opposite, they depend on using HALT.
- *
- * So for default C1, which is used during boot until cpuidle loads,
- * use MWAIT-C1 on Intel HW that has it, else use HALT.
+ * Do not prefer MWAIT if MONITOR instruction has a bug or idle=nomwait
+ * is passed to kernel commandline parameter.
  */
 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
 {
-       if (c->x86_vendor != X86_VENDOR_INTEL)
+       u32 eax, ebx, ecx, edx;
+
+       /* User has disallowed the use of MWAIT. Fallback to HALT */
+       if (boot_option_idle_override == IDLE_NOMWAIT)
                return 0;
 
-       if (!cpu_has(c, X86_FEATURE_MWAIT) || boot_cpu_has_bug(X86_BUG_MONITOR))
+       /* MWAIT is not supported on this platform. Fallback to HALT */
+       if (!cpu_has(c, X86_FEATURE_MWAIT))
                return 0;
 
-       return 1;
+       /* Monitor has a bug. Fallback to HALT */
+       if (boot_cpu_has_bug(X86_BUG_MONITOR))
+               return 0;
+
+       cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+
+       /*
+        * If MWAIT extensions are not available, it is safe to use MWAIT
+        * with EAX=0, ECX=0.
+        */
+       if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED))
+               return 1;
+
+       /*
+        * If MWAIT extensions are available, there should be at least one
+        * MWAIT C1 substate present.
+        */
+       return (edx & MWAIT_C1_SUBSTATE_MASK);
 }
 
 /*
@@ -932,9 +951,8 @@ static int __init idle_setup(char *str)
        } else if (!strcmp(str, "nomwait")) {
                /*
                 * If the boot option of "idle=nomwait" is added,
-                * it means that mwait will be disabled for CPU C2/C3
-                * states. In such case it won't touch the variable
-                * of boot_option_idle_override.
+                * it means that mwait will be disabled for CPU C1/C2/C3
+                * states.
                 */
                boot_option_idle_override = IDLE_NOMWAIT;
        } else
index fcc8a76..c7c4b19 100644 (file)
@@ -7,10 +7,12 @@
 #include <linux/linkage.h>
 #include <asm/page_types.h>
 #include <asm/kexec.h>
+#include <asm/nospec-branch.h>
 #include <asm/processor-flags.h>
 
 /*
- * Must be relocatable PIC code callable as a C function
+ * Must be relocatable PIC code callable as a C function, in particular
+ * there must be a plain RET and not jump to return thunk.
  */
 
 #define PTR(x) (x << 2)
@@ -91,7 +93,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
        movl    %edi, %eax
        addl    $(identity_mapped - relocate_kernel), %eax
        pushl   %eax
-       RET
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
 SYM_CODE_END(relocate_kernel)
 
 SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
@@ -159,12 +163,15 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
        xorl    %edx, %edx
        xorl    %esi, %esi
        xorl    %ebp, %ebp
-       RET
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
 1:
        popl    %edx
        movl    CP_PA_SWAP_PAGE(%edi), %esp
        addl    $PAGE_SIZE, %esp
 2:
+       ANNOTATE_RETPOLINE_SAFE
        call    *%edx
 
        /* get the re-entry point of the peer system */
@@ -190,7 +197,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
        movl    %edi, %eax
        addl    $(virtual_mapped - relocate_kernel), %eax
        pushl   %eax
-       RET
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
 SYM_CODE_END(identity_mapped)
 
 SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
@@ -208,7 +217,9 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
        popl    %edi
        popl    %esi
        popl    %ebx
-       RET
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
 SYM_CODE_END(virtual_mapped)
 
        /* Do the copies */
@@ -271,7 +282,9 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
        popl    %edi
        popl    %ebx
        popl    %ebp
-       RET
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
 SYM_CODE_END(swap_pages)
 
        .globl kexec_control_code_size
index c1d8626..4809c0d 100644 (file)
@@ -13,7 +13,8 @@
 #include <asm/unwind_hints.h>
 
 /*
- * Must be relocatable PIC code callable as a C function
+ * Must be relocatable PIC code callable as a C function, in particular
+ * there must be a plain RET and not jump to return thunk.
  */
 
 #define PTR(x) (x << 3)
@@ -105,7 +106,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
        /* jump to identity mapped page */
        addq    $(identity_mapped - relocate_kernel), %r8
        pushq   %r8
-       RET
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
 SYM_CODE_END(relocate_kernel)
 
 SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
@@ -200,7 +203,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
        xorl    %r14d, %r14d
        xorl    %r15d, %r15d
 
-       RET
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
 
 1:
        popq    %rdx
@@ -219,7 +224,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
        call    swap_pages
        movq    $virtual_mapped, %rax
        pushq   %rax
-       RET
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
 SYM_CODE_END(identity_mapped)
 
 SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
@@ -241,7 +248,9 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
        popq    %r12
        popq    %rbp
        popq    %rbx
-       RET
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
 SYM_CODE_END(virtual_mapped)
 
        /* Do the copies */
@@ -298,7 +307,9 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
        lea     PAGE_SIZE(%rax), %rsi
        jmp     0b
 3:
-       RET
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
 SYM_CODE_END(swap_pages)
 
        .globl kexec_control_code_size
index db2b350..bba1abd 100644 (file)
@@ -1,7 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
-#include <linux/dev_printk.h>
 #include <linux/ioport.h>
+#include <linux/printk.h>
 #include <asm/e820/api.h>
+#include <asm/pci_x86.h>
 
 static void resource_clip(struct resource *res, resource_size_t start,
                          resource_size_t end)
@@ -24,14 +25,14 @@ static void resource_clip(struct resource *res, resource_size_t start,
                res->start = end + 1;
 }
 
-void remove_e820_regions(struct device *dev, struct resource *avail)
+static void remove_e820_regions(struct resource *avail)
 {
        int i;
        struct e820_entry *entry;
        u64 e820_start, e820_end;
        struct resource orig = *avail;
 
-       if (!(avail->flags & IORESOURCE_MEM))
+       if (!pci_use_e820)
                return;
 
        for (i = 0; i < e820_table->nr_entries; i++) {
@@ -41,7 +42,7 @@ void remove_e820_regions(struct device *dev, struct resource *avail)
 
                resource_clip(avail, e820_start, e820_end);
                if (orig.start != avail->start || orig.end != avail->end) {
-                       dev_info(dev, "clipped %pR to %pR for e820 entry [mem %#010Lx-%#010Lx]\n",
+                       pr_info("clipped %pR to %pR for e820 entry [mem %#010Lx-%#010Lx]\n",
                                 &orig, avail, e820_start, e820_end);
                        orig = *avail;
                }
@@ -55,6 +56,9 @@ void arch_remove_reservations(struct resource *avail)
         * the low 1MB unconditionally, as this area is needed for some ISA
         * cards requiring a memory range, e.g. the i82365 PCMCIA controller.
         */
-       if (avail->flags & IORESOURCE_MEM)
+       if (avail->flags & IORESOURCE_MEM) {
                resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END);
+
+               remove_e820_regions(avail);
+       }
 }
index 3ebb853..216fee7 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/dma-map-ops.h>
 #include <linux/dmi.h>
 #include <linux/efi.h>
+#include <linux/ima.h>
 #include <linux/init_ohci1394_dma.h>
 #include <linux/initrd.h>
 #include <linux/iscsi_ibft.h>
@@ -23,6 +24,7 @@
 #include <linux/usb/xhci-dbgp.h>
 #include <linux/static_call.h>
 #include <linux/swiotlb.h>
+#include <linux/random.h>
 
 #include <uapi/linux/mount.h>
 
@@ -67,11 +69,6 @@ RESERVE_BRK(dmi_alloc, 65536);
 #endif
 
 
-/*
- * Range of the BSS area. The size of the BSS area is determined
- * at link time, with RESERVE_BRK() facility reserving additional
- * chunks.
- */
 unsigned long _brk_start = (unsigned long)__brk_base;
 unsigned long _brk_end   = (unsigned long)__brk_base;
 
@@ -145,6 +142,11 @@ __visible unsigned long mmu_cr4_features __ro_after_init;
 __visible unsigned long mmu_cr4_features __ro_after_init = X86_CR4_PAE;
 #endif
 
+#ifdef CONFIG_IMA
+static phys_addr_t ima_kexec_buffer_phys;
+static size_t ima_kexec_buffer_size;
+#endif
+
 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
 int bootloader_type, bootloader_version;
 
@@ -335,6 +337,60 @@ static void __init reserve_initrd(void)
 }
 #endif /* CONFIG_BLK_DEV_INITRD */
 
+static void __init add_early_ima_buffer(u64 phys_addr)
+{
+#ifdef CONFIG_IMA
+       struct ima_setup_data *data;
+
+       data = early_memremap(phys_addr + sizeof(struct setup_data), sizeof(*data));
+       if (!data) {
+               pr_warn("setup: failed to memremap ima_setup_data entry\n");
+               return;
+       }
+
+       if (data->size) {
+               memblock_reserve(data->addr, data->size);
+               ima_kexec_buffer_phys = data->addr;
+               ima_kexec_buffer_size = data->size;
+       }
+
+       early_memunmap(data, sizeof(*data));
+#else
+       pr_warn("Passed IMA kexec data, but CONFIG_IMA not set. Ignoring.\n");
+#endif
+}
+
+#if defined(CONFIG_HAVE_IMA_KEXEC) && !defined(CONFIG_OF_FLATTREE)
+int __init ima_free_kexec_buffer(void)
+{
+       int rc;
+
+       if (!ima_kexec_buffer_size)
+               return -ENOENT;
+
+       rc = memblock_phys_free(ima_kexec_buffer_phys,
+                               ima_kexec_buffer_size);
+       if (rc)
+               return rc;
+
+       ima_kexec_buffer_phys = 0;
+       ima_kexec_buffer_size = 0;
+
+       return 0;
+}
+
+int __init ima_get_kexec_buffer(void **addr, size_t *size)
+{
+       if (!ima_kexec_buffer_size)
+               return -ENOENT;
+
+       *addr = __va(ima_kexec_buffer_phys);
+       *size = ima_kexec_buffer_size;
+
+       return 0;
+}
+#endif
+
 static void __init parse_setup_data(void)
 {
        struct setup_data *data;
@@ -360,6 +416,18 @@ static void __init parse_setup_data(void)
                case SETUP_EFI:
                        parse_efi_setup(pa_data, data_len);
                        break;
+               case SETUP_IMA:
+                       add_early_ima_buffer(pa_data);
+                       break;
+               case SETUP_RNG_SEED:
+                       data = early_memremap(pa_data, data_len);
+                       add_bootloader_randomness(data->data, data->len);
+                       /* Zero seed for forward secrecy. */
+                       memzero_explicit(data->data, data->len);
+                       /* Zero length in case we find ourselves back here by accident. */
+                       memzero_explicit(&data->len, sizeof(data->len));
+                       early_memunmap(data, data_len);
+                       break;
                default:
                        break;
                }
index b478edf..3a5b0c9 100644 (file)
@@ -219,9 +219,10 @@ static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt
        return ES_VMM_ERROR;
 }
 
-enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, bool set_ghcb_msr,
-                                  struct es_em_ctxt *ctxt, u64 exit_code,
-                                  u64 exit_info_1, u64 exit_info_2)
+static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
+                                         struct es_em_ctxt *ctxt,
+                                         u64 exit_code, u64 exit_info_1,
+                                         u64 exit_info_2)
 {
        /* Fill in protocol and format specifiers */
        ghcb->protocol_version = ghcb_version;
@@ -231,14 +232,7 @@ enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, bool set_ghcb_msr,
        ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
        ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
 
-       /*
-        * Hyper-V unenlightened guests use a paravisor for communicating and
-        * GHCB pages are being allocated and set up by that paravisor. Linux
-        * should not change the GHCB page's physical address.
-        */
-       if (set_ghcb_msr)
-               sev_es_wr_ghcb_msr(__pa(ghcb));
-
+       sev_es_wr_ghcb_msr(__pa(ghcb));
        VMGEXIT();
 
        return verify_exception_info(ghcb, ctxt);
@@ -795,7 +789,7 @@ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
                 */
                sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer);
                ghcb_set_sw_scratch(ghcb, sw_scratch);
-               ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_IOIO,
+               ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO,
                                          exit_info_1, exit_info_2);
                if (ret != ES_OK)
                        return ret;
@@ -837,8 +831,7 @@ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
 
                ghcb_set_rax(ghcb, rax);
 
-               ret = sev_es_ghcb_hv_call(ghcb, true, ctxt,
-                                         SVM_EXIT_IOIO, exit_info_1, 0);
+               ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0);
                if (ret != ES_OK)
                        return ret;
 
@@ -894,7 +887,7 @@ static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
                /* xgetbv will cause #GP - use reset value for xcr0 */
                ghcb_set_xcr0(ghcb, 1);
 
-       ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_CPUID, 0, 0);
+       ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
        if (ret != ES_OK)
                return ret;
 
@@ -919,7 +912,7 @@ static enum es_result vc_handle_rdtsc(struct ghcb *ghcb,
        bool rdtscp = (exit_code == SVM_EXIT_RDTSCP);
        enum es_result ret;
 
-       ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, exit_code, 0, 0);
+       ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0);
        if (ret != ES_OK)
                return ret;
 
index c05f012..63dc626 100644 (file)
@@ -786,7 +786,7 @@ static int vmgexit_psc(struct snp_psc_desc *desc)
                ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
 
                /* This will advance the shared buffer data points to. */
-               ret = sev_es_ghcb_hv_call(ghcb, true, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
+               ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
 
                /*
                 * Page State Change VMGEXIT can pass error code through
@@ -1212,8 +1212,7 @@ static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
                ghcb_set_rdx(ghcb, regs->dx);
        }
 
-       ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_MSR,
-                                 exit_info_1, 0);
+       ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0);
 
        if ((ret == ES_OK) && (!exit_info_1)) {
                regs->ax = ghcb->save.rax;
@@ -1452,7 +1451,7 @@ static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
 
        ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
 
-       return sev_es_ghcb_hv_call(ghcb, true, ctxt, exit_code, exit_info_1, exit_info_2);
+       return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
 }
 
 /*
@@ -1628,7 +1627,7 @@ static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
 
        /* Using a value of 0 for ExitInfo1 means RAX holds the value */
        ghcb_set_rax(ghcb, val);
-       ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
+       ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
        if (ret != ES_OK)
                return ret;
 
@@ -1658,7 +1657,7 @@ static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
 static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
                                       struct es_em_ctxt *ctxt)
 {
-       return sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_WBINVD, 0, 0);
+       return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
 }
 
 static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
@@ -1667,7 +1666,7 @@ static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt
 
        ghcb_set_rcx(ghcb, ctxt->regs->cx);
 
-       ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_RDPMC, 0, 0);
+       ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
        if (ret != ES_OK)
                return ret;
 
@@ -1708,7 +1707,7 @@ static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
        if (x86_platform.hyper.sev_es_hcall_prepare)
                x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
 
-       ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_VMMCALL, 0, 0);
+       ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
        if (ret != ES_OK)
                return ret;
 
@@ -2197,7 +2196,7 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
                ghcb_set_rbx(ghcb, input->data_npages);
        }
 
-       ret = sev_es_ghcb_hv_call(ghcb, true, &ctxt, exit_code, input->req_gpa, input->resp_gpa);
+       ret = sev_es_ghcb_hv_call(ghcb, &ctxt, exit_code, input->req_gpa, input->resp_gpa);
        if (ret)
                goto e_put;
 
index aa72cef..aaaba85 100644 (file)
@@ -12,13 +12,21 @@ enum insn_type {
 };
 
 /*
+ * ud1 %esp, %ecx - a 3 byte #UD that is unique to trampolines, chosen such
+ * that there is no false-positive trampoline identification while also being a
+ * speculation stop.
+ */
+static const u8 tramp_ud[] = { 0x0f, 0xb9, 0xcc };
+
+/*
  * cs cs cs xorl %eax, %eax - a single 5 byte instruction that clears %[er]ax
  */
 static const u8 xor5rax[] = { 0x2e, 0x2e, 0x2e, 0x31, 0xc0 };
 
 static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc };
 
-static void __ref __static_call_transform(void *insn, enum insn_type type, void *func)
+static void __ref __static_call_transform(void *insn, enum insn_type type,
+                                         void *func, bool modinit)
 {
        const void *emulate = NULL;
        int size = CALL_INSN_SIZE;
@@ -43,14 +51,17 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, void
                break;
 
        case RET:
-               code = &retinsn;
+               if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
+                       code = text_gen_insn(JMP32_INSN_OPCODE, insn, &__x86_return_thunk);
+               else
+                       code = &retinsn;
                break;
        }
 
        if (memcmp(insn, code, size) == 0)
                return;
 
-       if (unlikely(system_state == SYSTEM_BOOTING))
+       if (system_state == SYSTEM_BOOTING || modinit)
                return text_poke_early(insn, code, size);
 
        text_poke_bp(insn, code, size, emulate);
@@ -60,7 +71,7 @@ static void __static_call_validate(void *insn, bool tail, bool tramp)
 {
        u8 opcode = *(u8 *)insn;
 
-       if (tramp && memcmp(insn+5, "SCT", 3)) {
+       if (tramp && memcmp(insn+5, tramp_ud, 3)) {
                pr_err("trampoline signature fail");
                BUG();
        }
@@ -104,14 +115,42 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
 
        if (tramp) {
                __static_call_validate(tramp, true, true);
-               __static_call_transform(tramp, __sc_insn(!func, true), func);
+               __static_call_transform(tramp, __sc_insn(!func, true), func, false);
        }
 
        if (IS_ENABLED(CONFIG_HAVE_STATIC_CALL_INLINE) && site) {
                __static_call_validate(site, tail, false);
-               __static_call_transform(site, __sc_insn(!func, tail), func);
+               __static_call_transform(site, __sc_insn(!func, tail), func, false);
        }
 
        mutex_unlock(&text_mutex);
 }
 EXPORT_SYMBOL_GPL(arch_static_call_transform);
+
+#ifdef CONFIG_RETHUNK
+/*
+ * This is called by apply_returns() to fix up static call trampolines,
+ * specifically ARCH_DEFINE_STATIC_CALL_NULL_TRAMP which is recorded as
+ * having a return trampoline.
+ *
+ * The problem is that static_call() is available before determining
+ * X86_FEATURE_RETHUNK and, by implication, running alternatives.
+ *
+ * This means that __static_call_transform() above can have overwritten the
+ * return trampoline and we now need to fix things up to be consistent.
+ */
+bool __static_call_fixup(void *tramp, u8 op, void *dest)
+{
+       if (memcmp(tramp+5, tramp_ud, 3)) {
+               /* Not a trampoline site, not our problem. */
+               return false;
+       }
+
+       mutex_lock(&text_mutex);
+       if (op == RET_INSN_OPCODE || dest == &__x86_return_thunk)
+               __static_call_transform(tramp, RET, NULL, true);
+       mutex_unlock(&text_mutex);
+
+       return true;
+}
+#endif
index f5f6dc2..15f2905 100644 (file)
@@ -141,7 +141,7 @@ SECTIONS
 
 #ifdef CONFIG_RETPOLINE
                __indirect_thunk_start = .;
-               *(.text.__x86.indirect_thunk)
+               *(.text.__x86.*)
                __indirect_thunk_end = .;
 #endif
        } :text =0xcccc
@@ -283,6 +283,13 @@ SECTIONS
                *(.retpoline_sites)
                __retpoline_sites_end = .;
        }
+
+       . = ALIGN(8);
+       .return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) {
+               __return_sites = .;
+               *(.return_sites)
+               __return_sites_end = .;
+       }
 #endif
 
 #ifdef CONFIG_X86_KERNEL_IBT
@@ -388,7 +395,7 @@ SECTIONS
        .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
                __brk_base = .;
                . += 64 * 1024;         /* 64k alignment slop space */
-               *(.brk_reservation)     /* areas brk users have reserved */
+               *(.bss..brk)            /* areas brk users have reserved */
                __brk_limit = .;
        }
 
index 89b11e7..f8382ab 100644 (file)
 #define X8(x...) X4(x), X4(x)
 #define X16(x...) X8(x), X8(x)
 
-#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
-#define FASTOP_SIZE (8 * (1 + HAS_KERNEL_IBT))
-
 struct opcode {
        u64 flags;
        u8 intercept;
@@ -306,9 +303,15 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
  * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
  * different operand sizes can be reached by calculation, rather than a jump
  * table (which would be bigger than the code).
+ *
+ * The 16 byte alignment, considering 5 bytes for the RET thunk, 3 for ENDBR
+ * and 1 for the straight line speculation INT3, leaves 7 bytes for the
+ * body of the function.  Currently none is larger than 4.
  */
 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
 
+#define FASTOP_SIZE    16
+
 #define __FOP_FUNC(name) \
        ".align " __stringify(FASTOP_SIZE) " \n\t" \
        ".type " name ", @function \n\t" \
@@ -325,13 +328,15 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
 #define FOP_RET(name) \
        __FOP_RET(#name)
 
-#define FOP_START(op) \
+#define __FOP_START(op, align) \
        extern void em_##op(struct fastop *fake); \
        asm(".pushsection .text, \"ax\" \n\t" \
            ".global em_" #op " \n\t" \
-           ".align " __stringify(FASTOP_SIZE) " \n\t" \
+           ".align " __stringify(align) " \n\t" \
            "em_" #op ":\n\t"
 
+#define FOP_START(op) __FOP_START(op, FASTOP_SIZE)
+
 #define FOP_END \
            ".popsection")
 
@@ -435,17 +440,12 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
 /*
  * Depending on .config the SETcc functions look like:
  *
- * ENDBR       [4 bytes; CONFIG_X86_KERNEL_IBT]
- * SETcc %al   [3 bytes]
- * RET         [1 byte]
- * INT3        [1 byte; CONFIG_SLS]
- *
- * Which gives possible sizes 4, 5, 8 or 9.  When rounded up to the
- * next power-of-two alignment they become 4, 8 or 16 resp.
+ * ENDBR                       [4 bytes; CONFIG_X86_KERNEL_IBT]
+ * SETcc %al                   [3 bytes]
+ * RET | JMP __x86_return_thunk        [1,5 bytes; CONFIG_RETHUNK]
+ * INT3                                [1 byte; CONFIG_SLS]
  */
-#define SETCC_LENGTH   (ENDBR_INSN_SIZE + 4 + IS_ENABLED(CONFIG_SLS))
-#define SETCC_ALIGN    (4 << IS_ENABLED(CONFIG_SLS) << HAS_KERNEL_IBT)
-static_assert(SETCC_LENGTH <= SETCC_ALIGN);
+#define SETCC_ALIGN    16
 
 #define FOP_SETCC(op) \
        ".align " __stringify(SETCC_ALIGN) " \n\t" \
@@ -453,9 +453,10 @@ static_assert(SETCC_LENGTH <= SETCC_ALIGN);
        #op ": \n\t" \
        ASM_ENDBR \
        #op " %al \n\t" \
-       __FOP_RET(#op)
+       __FOP_RET(#op) \
+       ".skip " __stringify(SETCC_ALIGN) " - (.-" #op "), 0xcc \n\t"
 
-FOP_START(setcc)
+__FOP_START(setcc, SETCC_ALIGN)
 FOP_SETCC(seto)
 FOP_SETCC(setno)
 FOP_SETCC(setc)
index f1bdac3..0e68b4c 100644 (file)
@@ -2039,6 +2039,19 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
        }
 }
 
+static void kvm_lapic_xapic_id_updated(struct kvm_lapic *apic)
+{
+       struct kvm *kvm = apic->vcpu->kvm;
+
+       if (KVM_BUG_ON(apic_x2apic_mode(apic), kvm))
+               return;
+
+       if (kvm_xapic_id(apic) == apic->vcpu->vcpu_id)
+               return;
+
+       kvm_set_apicv_inhibit(apic->vcpu->kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
+}
+
 static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
 {
        int ret = 0;
@@ -2047,10 +2060,12 @@ static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
 
        switch (reg) {
        case APIC_ID:           /* Local APIC ID */
-               if (!apic_x2apic_mode(apic))
+               if (!apic_x2apic_mode(apic)) {
                        kvm_apic_set_xapic_id(apic, val >> 24);
-               else
+                       kvm_lapic_xapic_id_updated(apic);
+               } else {
                        ret = 1;
+               }
                break;
 
        case APIC_TASKPRI:
@@ -2336,8 +2351,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
                             MSR_IA32_APICBASE_BASE;
 
        if ((value & MSR_IA32_APICBASE_ENABLE) &&
-            apic->base_address != APIC_DEFAULT_PHYS_BASE)
-               pr_warn_once("APIC base relocation is unsupported by KVM");
+            apic->base_address != APIC_DEFAULT_PHYS_BASE) {
+               kvm_set_apicv_inhibit(apic->vcpu->kvm,
+                                     APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
+       }
 }
 
 void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
@@ -2648,6 +2665,8 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
                        icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
                        __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
                }
+       } else {
+               kvm_lapic_xapic_id_updated(vcpu->arch.apic);
        }
 
        return 0;
index e826ee9..17252f3 100644 (file)
@@ -3411,7 +3411,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
                        root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
                                              i << 30, PT32_ROOT_LEVEL, true);
                        mmu->pae_root[i] = root | PT_PRESENT_MASK |
-                                          shadow_me_mask;
+                                          shadow_me_value;
                }
                mmu->root.hpa = __pa(mmu->pae_root);
        } else {
index 54fe037..d1bc582 100644 (file)
@@ -291,58 +291,91 @@ void avic_ring_doorbell(struct kvm_vcpu *vcpu)
 static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source,
                                       u32 icrl, u32 icrh, u32 index)
 {
-       u32 dest, apic_id;
-       struct kvm_vcpu *vcpu;
+       u32 l1_physical_id, dest;
+       struct kvm_vcpu *target_vcpu;
        int dest_mode = icrl & APIC_DEST_MASK;
        int shorthand = icrl & APIC_SHORT_MASK;
        struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
-       u32 *avic_logical_id_table = page_address(kvm_svm->avic_logical_id_table_page);
 
        if (shorthand != APIC_DEST_NOSHORT)
                return -EINVAL;
 
-       /*
-        * The AVIC incomplete IPI #vmexit info provides index into
-        * the physical APIC ID table, which can be used to derive
-        * guest physical APIC ID.
-        */
+       if (apic_x2apic_mode(source))
+               dest = icrh;
+       else
+               dest = GET_APIC_DEST_FIELD(icrh);
+
        if (dest_mode == APIC_DEST_PHYSICAL) {
-               apic_id = index;
+               /* broadcast destination, use slow path */
+               if (apic_x2apic_mode(source) && dest == X2APIC_BROADCAST)
+                       return -EINVAL;
+               if (!apic_x2apic_mode(source) && dest == APIC_BROADCAST)
+                       return -EINVAL;
+
+               l1_physical_id = dest;
+
+               if (WARN_ON_ONCE(l1_physical_id != index))
+                       return -EINVAL;
+
        } else {
-               if (!apic_x2apic_mode(source)) {
-                       /* For xAPIC logical mode, the index is for logical APIC table. */
-                       apic_id = avic_logical_id_table[index] & 0x1ff;
+               u32 bitmap, cluster;
+               int logid_index;
+
+               if (apic_x2apic_mode(source)) {
+                       /* 16 bit dest mask, 16 bit cluster id */
+                       bitmap = dest & 0xFFFF0000;
+                       cluster = (dest >> 16) << 4;
+               } else if (kvm_lapic_get_reg(source, APIC_DFR) == APIC_DFR_FLAT) {
+                       /* 8 bit dest mask*/
+                       bitmap = dest;
+                       cluster = 0;
                } else {
-                       return -EINVAL;
+                       /* 4 bit desk mask, 4 bit cluster id */
+                       bitmap = dest & 0xF;
+                       cluster = (dest >> 4) << 2;
                }
-       }
 
-       /*
-        * Assuming vcpu ID is the same as physical apic ID,
-        * and use it to retrieve the target vCPU.
-        */
-       vcpu = kvm_get_vcpu_by_id(kvm, apic_id);
-       if (!vcpu)
-               return -EINVAL;
+               if (unlikely(!bitmap))
+                       /* guest bug: nobody to send the logical interrupt to */
+                       return 0;
 
-       if (apic_x2apic_mode(vcpu->arch.apic))
-               dest = icrh;
-       else
-               dest = GET_APIC_DEST_FIELD(icrh);
+               if (!is_power_of_2(bitmap))
+                       /* multiple logical destinations, use slow path */
+                       return -EINVAL;
 
-       /*
-        * Try matching the destination APIC ID with the vCPU.
-        */
-       if (kvm_apic_match_dest(vcpu, source, shorthand, dest, dest_mode)) {
-               vcpu->arch.apic->irr_pending = true;
-               svm_complete_interrupt_delivery(vcpu,
-                                               icrl & APIC_MODE_MASK,
-                                               icrl & APIC_INT_LEVELTRIG,
-                                               icrl & APIC_VECTOR_MASK);
-               return 0;
+               logid_index = cluster + __ffs(bitmap);
+
+               if (apic_x2apic_mode(source)) {
+                       l1_physical_id = logid_index;
+               } else {
+                       u32 *avic_logical_id_table =
+                               page_address(kvm_svm->avic_logical_id_table_page);
+
+                       u32 logid_entry = avic_logical_id_table[logid_index];
+
+                       if (WARN_ON_ONCE(index != logid_index))
+                               return -EINVAL;
+
+                       /* guest bug: non existing/reserved logical destination */
+                       if (unlikely(!(logid_entry & AVIC_LOGICAL_ID_ENTRY_VALID_MASK)))
+                               return 0;
+
+                       l1_physical_id = logid_entry &
+                                        AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
+               }
        }
 
-       return -EINVAL;
+       target_vcpu = kvm_get_vcpu_by_id(kvm, l1_physical_id);
+       if (unlikely(!target_vcpu))
+               /* guest bug: non existing vCPU is a target of this IPI*/
+               return 0;
+
+       target_vcpu->arch.apic->irr_pending = true;
+       svm_complete_interrupt_delivery(target_vcpu,
+                                       icrl & APIC_MODE_MASK,
+                                       icrl & APIC_INT_LEVELTRIG,
+                                       icrl & APIC_VECTOR_MASK);
+       return 0;
 }
 
 static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
@@ -508,35 +541,6 @@ static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
        return ret;
 }
 
-static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
-{
-       u64 *old, *new;
-       struct vcpu_svm *svm = to_svm(vcpu);
-       u32 id = kvm_xapic_id(vcpu->arch.apic);
-
-       if (vcpu->vcpu_id == id)
-               return 0;
-
-       old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
-       new = avic_get_physical_id_entry(vcpu, id);
-       if (!new || !old)
-               return 1;
-
-       /* We need to move physical_id_entry to new offset */
-       *new = *old;
-       *old = 0ULL;
-       to_svm(vcpu)->avic_physical_id_cache = new;
-
-       /*
-        * Also update the guest physical APIC ID in the logical
-        * APIC ID table entry if already setup the LDR.
-        */
-       if (svm->ldr_reg)
-               avic_handle_ldr_update(vcpu);
-
-       return 0;
-}
-
 static void avic_handle_dfr_update(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -555,10 +559,6 @@ static int avic_unaccel_trap_write(struct kvm_vcpu *vcpu)
                                AVIC_UNACCEL_ACCESS_OFFSET_MASK;
 
        switch (offset) {
-       case APIC_ID:
-               if (avic_handle_apic_id_update(vcpu))
-                       return 0;
-               break;
        case APIC_LDR:
                if (avic_handle_ldr_update(vcpu))
                        return 0;
@@ -650,8 +650,6 @@ int avic_init_vcpu(struct vcpu_svm *svm)
 
 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu)
 {
-       if (avic_handle_apic_id_update(vcpu) != 0)
-               return;
        avic_handle_dfr_update(vcpu);
        avic_handle_ldr_update(vcpu);
 }
@@ -910,7 +908,9 @@ bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
                          BIT(APICV_INHIBIT_REASON_PIT_REINJ) |
                          BIT(APICV_INHIBIT_REASON_X2APIC) |
                          BIT(APICV_INHIBIT_REASON_BLOCKIRQ) |
-                         BIT(APICV_INHIBIT_REASON_SEV);
+                         BIT(APICV_INHIBIT_REASON_SEV)      |
+                         BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) |
+                         BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
 
        return supported & BIT(reason);
 }
@@ -946,7 +946,7 @@ out:
        return ret;
 }
 
-void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        u64 entry;
        int h_physical_id = kvm_cpu_get_apicid(cpu);
@@ -978,7 +978,7 @@ void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
 }
 
-void __avic_vcpu_put(struct kvm_vcpu *vcpu)
+void avic_vcpu_put(struct kvm_vcpu *vcpu)
 {
        u64 entry;
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -997,25 +997,6 @@ void __avic_vcpu_put(struct kvm_vcpu *vcpu)
        WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
 }
 
-static void avic_vcpu_load(struct kvm_vcpu *vcpu)
-{
-       int cpu = get_cpu();
-
-       WARN_ON(cpu != vcpu->cpu);
-
-       __avic_vcpu_load(vcpu, cpu);
-
-       put_cpu();
-}
-
-static void avic_vcpu_put(struct kvm_vcpu *vcpu)
-{
-       preempt_disable();
-
-       __avic_vcpu_put(vcpu);
-
-       preempt_enable();
-}
 
 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
 {
@@ -1042,7 +1023,7 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
        vmcb_mark_dirty(vmcb, VMCB_AVIC);
 
        if (activated)
-               avic_vcpu_load(vcpu);
+               avic_vcpu_load(vcpu, vcpu->cpu);
        else
                avic_vcpu_put(vcpu);
 
@@ -1075,5 +1056,5 @@ void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
        if (!kvm_vcpu_apicv_active(vcpu))
                return;
 
-       avic_vcpu_load(vcpu);
+       avic_vcpu_load(vcpu, vcpu->cpu);
 }
index 3361258..ba7cd26 100644 (file)
@@ -616,6 +616,8 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
        struct kvm_vcpu *vcpu = &svm->vcpu;
        struct vmcb *vmcb01 = svm->vmcb01.ptr;
        struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
+       u32 pause_count12;
+       u32 pause_thresh12;
 
        /*
         * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
@@ -671,27 +673,25 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
        if (!nested_vmcb_needs_vls_intercept(svm))
                vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
 
+       pause_count12 = svm->pause_filter_enabled ? svm->nested.ctl.pause_filter_count : 0;
+       pause_thresh12 = svm->pause_threshold_enabled ? svm->nested.ctl.pause_filter_thresh : 0;
        if (kvm_pause_in_guest(svm->vcpu.kvm)) {
-               /* use guest values since host doesn't use them */
-               vmcb02->control.pause_filter_count =
-                               svm->pause_filter_enabled ?
-                               svm->nested.ctl.pause_filter_count : 0;
+               /* use guest values since host doesn't intercept PAUSE */
+               vmcb02->control.pause_filter_count = pause_count12;
+               vmcb02->control.pause_filter_thresh = pause_thresh12;
 
-               vmcb02->control.pause_filter_thresh =
-                               svm->pause_threshold_enabled ?
-                               svm->nested.ctl.pause_filter_thresh : 0;
-
-       } else if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) {
-               /* use host values when guest doesn't use them */
+       } else {
+               /* start from host values otherwise */
                vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count;
                vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh;
-       } else {
-               /*
-                * Intercept every PAUSE otherwise and
-                * ignore both host and guest values
-                */
-               vmcb02->control.pause_filter_count = 0;
-               vmcb02->control.pause_filter_thresh = 0;
+
+               /* ... but ensure filtering is disabled if so requested.  */
+               if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) {
+                       if (!pause_count12)
+                               vmcb02->control.pause_filter_count = 0;
+                       if (!pause_thresh12)
+                               vmcb02->control.pause_filter_thresh = 0;
+               }
        }
 
        nested_svm_transition_tlb_flush(vcpu);
@@ -951,8 +951,11 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
        vmcb12->control.event_inj         = svm->nested.ctl.event_inj;
        vmcb12->control.event_inj_err     = svm->nested.ctl.event_inj_err;
 
-       if (!kvm_pause_in_guest(vcpu->kvm) && vmcb02->control.pause_filter_count)
+       if (!kvm_pause_in_guest(vcpu->kvm)) {
                vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
+               vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
+
+       }
 
        nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
 
index 51fd985..0c240ed 100644 (file)
@@ -844,7 +844,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
 
        /* If source buffer is not aligned then use an intermediate buffer */
        if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
-               src_tpage = alloc_page(GFP_KERNEL);
+               src_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
                if (!src_tpage)
                        return -ENOMEM;
 
@@ -865,7 +865,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
        if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
                int dst_offset;
 
-               dst_tpage = alloc_page(GFP_KERNEL);
+               dst_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
                if (!dst_tpage) {
                        ret = -ENOMEM;
                        goto e_free;
@@ -1665,19 +1665,24 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
 {
        struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info;
        struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info;
+       struct kvm_vcpu *dst_vcpu, *src_vcpu;
+       struct vcpu_svm *dst_svm, *src_svm;
        struct kvm_sev_info *mirror;
+       unsigned long i;
 
        dst->active = true;
        dst->asid = src->asid;
        dst->handle = src->handle;
        dst->pages_locked = src->pages_locked;
        dst->enc_context_owner = src->enc_context_owner;
+       dst->es_active = src->es_active;
 
        src->asid = 0;
        src->active = false;
        src->handle = 0;
        src->pages_locked = 0;
        src->enc_context_owner = NULL;
+       src->es_active = false;
 
        list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
 
@@ -1704,26 +1709,21 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
                list_del(&src->mirror_entry);
                list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms);
        }
-}
 
-static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
-{
-       unsigned long i;
-       struct kvm_vcpu *dst_vcpu, *src_vcpu;
-       struct vcpu_svm *dst_svm, *src_svm;
+       kvm_for_each_vcpu(i, dst_vcpu, dst_kvm) {
+               dst_svm = to_svm(dst_vcpu);
 
-       if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
-               return -EINVAL;
+               sev_init_vmcb(dst_svm);
 
-       kvm_for_each_vcpu(i, src_vcpu, src) {
-               if (!src_vcpu->arch.guest_state_protected)
-                       return -EINVAL;
-       }
+               if (!dst->es_active)
+                       continue;
 
-       kvm_for_each_vcpu(i, src_vcpu, src) {
+               /*
+                * Note, the source is not required to have the same number of
+                * vCPUs as the destination when migrating a vanilla SEV VM.
+                */
+               src_vcpu = kvm_get_vcpu(dst_kvm, i);
                src_svm = to_svm(src_vcpu);
-               dst_vcpu = kvm_get_vcpu(dst, i);
-               dst_svm = to_svm(dst_vcpu);
 
                /*
                 * Transfer VMSA and GHCB state to the destination.  Nullify and
@@ -1740,8 +1740,23 @@ static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
                src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
                src_vcpu->arch.guest_state_protected = false;
        }
-       to_kvm_svm(src)->sev_info.es_active = false;
-       to_kvm_svm(dst)->sev_info.es_active = true;
+}
+
+static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
+{
+       struct kvm_vcpu *src_vcpu;
+       unsigned long i;
+
+       if (!sev_es_guest(src))
+               return 0;
+
+       if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
+               return -EINVAL;
+
+       kvm_for_each_vcpu(i, src_vcpu, src) {
+               if (!src_vcpu->arch.guest_state_protected)
+                       return -EINVAL;
+       }
 
        return 0;
 }
@@ -1789,11 +1804,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
        if (ret)
                goto out_dst_vcpu;
 
-       if (sev_es_guest(source_kvm)) {
-               ret = sev_es_migrate_from(kvm, source_kvm);
-               if (ret)
-                       goto out_source_vcpu;
-       }
+       ret = sev_check_source_vcpus(kvm, source_kvm);
+       if (ret)
+               goto out_source_vcpu;
 
        sev_migrate_from(kvm, source_kvm);
        kvm_vm_dead(source_kvm);
@@ -2914,7 +2927,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
                                    count, in);
 }
 
-void sev_es_init_vmcb(struct vcpu_svm *svm)
+static void sev_es_init_vmcb(struct vcpu_svm *svm)
 {
        struct kvm_vcpu *vcpu = &svm->vcpu;
 
@@ -2967,6 +2980,15 @@ void sev_es_init_vmcb(struct vcpu_svm *svm)
        }
 }
 
+void sev_init_vmcb(struct vcpu_svm *svm)
+{
+       svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
+       clr_exception_intercept(svm, UD_VECTOR);
+
+       if (sev_es_guest(svm->vcpu.kvm))
+               sev_es_init_vmcb(svm);
+}
+
 void sev_es_vcpu_reset(struct vcpu_svm *svm)
 {
        /*
index 1dc02cd..44bbf25 100644 (file)
@@ -921,7 +921,7 @@ static void grow_ple_window(struct kvm_vcpu *vcpu)
        struct vmcb_control_area *control = &svm->vmcb->control;
        int old = control->pause_filter_count;
 
-       if (kvm_pause_in_guest(vcpu->kvm) || !old)
+       if (kvm_pause_in_guest(vcpu->kvm))
                return;
 
        control->pause_filter_count = __grow_ple_window(old,
@@ -942,7 +942,7 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
        struct vmcb_control_area *control = &svm->vmcb->control;
        int old = control->pause_filter_count;
 
-       if (kvm_pause_in_guest(vcpu->kvm) || !old)
+       if (kvm_pause_in_guest(vcpu->kvm))
                return;
 
        control->pause_filter_count =
@@ -1212,15 +1212,8 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
                svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
        }
 
-       if (sev_guest(vcpu->kvm)) {
-               svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
-               clr_exception_intercept(svm, UD_VECTOR);
-
-               if (sev_es_guest(vcpu->kvm)) {
-                       /* Perform SEV-ES specific VMCB updates */
-                       sev_es_init_vmcb(svm);
-               }
-       }
+       if (sev_guest(vcpu->kvm))
+               sev_init_vmcb(svm);
 
        svm_hv_init_vmcb(vmcb);
        init_vmcb_after_set_cpuid(vcpu);
@@ -1400,13 +1393,13 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                indirect_branch_prediction_barrier();
        }
        if (kvm_vcpu_apicv_active(vcpu))
-               __avic_vcpu_load(vcpu, cpu);
+               avic_vcpu_load(vcpu, cpu);
 }
 
 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
 {
        if (kvm_vcpu_apicv_active(vcpu))
-               __avic_vcpu_put(vcpu);
+               avic_vcpu_put(vcpu);
 
        svm_prepare_host_switch(vcpu);
 
index 500348c..9223ac1 100644 (file)
@@ -610,8 +610,8 @@ void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb);
 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
 int avic_init_vcpu(struct vcpu_svm *svm);
-void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
-void __avic_vcpu_put(struct kvm_vcpu *vcpu);
+void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void avic_vcpu_put(struct kvm_vcpu *vcpu);
 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
 void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
@@ -649,10 +649,10 @@ void __init sev_set_cpu_caps(void);
 void __init sev_hardware_setup(void);
 void sev_hardware_unsetup(void);
 int sev_cpu_init(struct svm_cpu_data *sd);
+void sev_init_vmcb(struct vcpu_svm *svm);
 void sev_free_vcpu(struct kvm_vcpu *vcpu);
 int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
-void sev_es_init_vmcb(struct vcpu_svm *svm);
 void sev_es_vcpu_reset(struct vcpu_svm *svm);
 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
 void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa);
index dfaeb47..723f853 100644 (file)
@@ -111,6 +111,15 @@ SYM_FUNC_START(__svm_vcpu_run)
 #endif
 
        /*
+        * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
+        * untrained as soon as we exit the VM and are back to the
+        * kernel. This should be done before re-enabling interrupts
+        * because interrupt handlers won't sanitize 'ret' if the return is
+        * from the kernel.
+        */
+       UNTRAIN_RET
+
+       /*
         * Clear all general purpose registers except RSP and RAX to prevent
         * speculative use of the guest's values, even those that are reloaded
         * via the stack.  In theory, an L1 cache miss when restoring registers
@@ -190,6 +199,15 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
        FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
 #endif
 
+       /*
+        * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
+        * untrained as soon as we exit the VM and are back to the
+        * kernel. This should be done before re-enabling interrupts
+        * because interrupt handlers won't sanitize RET if the return is
+        * from the kernel.
+        */
+       UNTRAIN_RET
+
        pop %_ASM_BX
 
 #ifdef CONFIG_X86_64
index 3f430e2..c0e2482 100644 (file)
@@ -4,8 +4,8 @@
 
 #include <asm/vmx.h>
 
-#include "lapic.h"
-#include "x86.h"
+#include "../lapic.h"
+#include "../x86.h"
 
 extern bool __read_mostly enable_vpid;
 extern bool __read_mostly flexpriority_enabled;
index f5cb18e..ab135f9 100644 (file)
@@ -2278,7 +2278,6 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
                                  SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
                                  SECONDARY_EXEC_APIC_REGISTER_VIRT |
                                  SECONDARY_EXEC_ENABLE_VMFUNC |
-                                 SECONDARY_EXEC_TSC_SCALING |
                                  SECONDARY_EXEC_DESC);
 
                if (nested_cpu_has(vmcs12,
@@ -3087,7 +3086,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
        }
 
        vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
-                                vmx->loaded_vmcs->launched);
+                                __vmx_vcpu_run_flags(vmx));
 
        if (vmx->msr_autoload.host.nr)
                vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
diff --git a/arch/x86/kvm/vmx/run_flags.h b/arch/x86/kvm/vmx/run_flags.h
new file mode 100644 (file)
index 0000000..edc3f16
--- /dev/null
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_VMX_RUN_FLAGS_H
+#define __KVM_X86_VMX_RUN_FLAGS_H
+
+#define VMX_RUN_VMRESUME       (1 << 0)
+#define VMX_RUN_SAVE_SPEC_CTRL (1 << 1)
+
+#endif /* __KVM_X86_VMX_RUN_FLAGS_H */
index 435c187..4182c7f 100644 (file)
@@ -1,10 +1,13 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #include <linux/linkage.h>
 #include <asm/asm.h>
+#include <asm/asm-offsets.h>
 #include <asm/bitsperlong.h>
 #include <asm/kvm_vcpu_regs.h>
 #include <asm/nospec-branch.h>
+#include <asm/percpu.h>
 #include <asm/segment.h>
+#include "run_flags.h"
 
 #define WORD_SIZE (BITS_PER_LONG / 8)
 
 .section .noinstr.text, "ax"
 
 /**
- * vmx_vmenter - VM-Enter the current loaded VMCS
- *
- * %RFLAGS.ZF: !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME
- *
- * Returns:
- *     %RFLAGS.CF is set on VM-Fail Invalid
- *     %RFLAGS.ZF is set on VM-Fail Valid
- *     %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
- *
- * Note that VMRESUME/VMLAUNCH fall-through and return directly if
- * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump
- * to vmx_vmexit.
- */
-SYM_FUNC_START_LOCAL(vmx_vmenter)
-       /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */
-       je 2f
-
-1:     vmresume
-       RET
-
-2:     vmlaunch
-       RET
-
-3:     cmpb $0, kvm_rebooting
-       je 4f
-       RET
-4:     ud2
-
-       _ASM_EXTABLE(1b, 3b)
-       _ASM_EXTABLE(2b, 3b)
-
-SYM_FUNC_END(vmx_vmenter)
-
-/**
- * vmx_vmexit - Handle a VMX VM-Exit
- *
- * Returns:
- *     %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
- *
- * This is vmx_vmenter's partner in crime.  On a VM-Exit, control will jump
- * here after hardware loads the host's state, i.e. this is the destination
- * referred to by VMCS.HOST_RIP.
- */
-SYM_FUNC_START(vmx_vmexit)
-#ifdef CONFIG_RETPOLINE
-       ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE
-       /* Preserve guest's RAX, it's used to stuff the RSB. */
-       push %_ASM_AX
-
-       /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
-       FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
-
-       /* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */
-       or $1, %_ASM_AX
-
-       pop %_ASM_AX
-.Lvmexit_skip_rsb:
-#endif
-       RET
-SYM_FUNC_END(vmx_vmexit)
-
-/**
  * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
- * @vmx:       struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
+ * @vmx:       struct vcpu_vmx *
  * @regs:      unsigned long * (to guest registers)
- * @launched:  %true if the VMCS has been launched
+ * @flags:     VMX_RUN_VMRESUME:       use VMRESUME instead of VMLAUNCH
+ *             VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl
  *
  * Returns:
  *     0 on VM-Exit, 1 on VM-Fail
@@ -115,24 +57,56 @@ SYM_FUNC_START(__vmx_vcpu_run)
 #endif
        push %_ASM_BX
 
+       /* Save @vmx for SPEC_CTRL handling */
+       push %_ASM_ARG1
+
+       /* Save @flags for SPEC_CTRL handling */
+       push %_ASM_ARG3
+
        /*
         * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
         * @regs is needed after VM-Exit to save the guest's register values.
         */
        push %_ASM_ARG2
 
-       /* Copy @launched to BL, _ASM_ARG3 is volatile. */
+       /* Copy @flags to BL, _ASM_ARG3 is volatile. */
        mov %_ASM_ARG3B, %bl
 
-       /* Adjust RSP to account for the CALL to vmx_vmenter(). */
-       lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
+       lea (%_ASM_SP), %_ASM_ARG2
        call vmx_update_host_rsp
 
+       ALTERNATIVE "jmp .Lspec_ctrl_done", "", X86_FEATURE_MSR_SPEC_CTRL
+
+       /*
+        * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
+        * host's, write the MSR.
+        *
+        * IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
+        * there must not be any returns or indirect branches between this code
+        * and vmentry.
+        */
+       mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
+       movl VMX_spec_ctrl(%_ASM_DI), %edi
+       movl PER_CPU_VAR(x86_spec_ctrl_current), %esi
+       cmp %edi, %esi
+       je .Lspec_ctrl_done
+       mov $MSR_IA32_SPEC_CTRL, %ecx
+       xor %edx, %edx
+       mov %edi, %eax
+       wrmsr
+
+.Lspec_ctrl_done:
+
+       /*
+        * Since vmentry is serializing on affected CPUs, there's no need for
+        * an LFENCE to stop speculation from skipping the wrmsr.
+        */
+
        /* Load @regs to RAX. */
        mov (%_ASM_SP), %_ASM_AX
 
        /* Check if vmlaunch or vmresume is needed */
-       testb %bl, %bl
+       testb $VMX_RUN_VMRESUME, %bl
 
        /* Load guest registers.  Don't clobber flags. */
        mov VCPU_RCX(%_ASM_AX), %_ASM_CX
@@ -154,11 +128,37 @@ SYM_FUNC_START(__vmx_vcpu_run)
        /* Load guest RAX.  This kills the @regs pointer! */
        mov VCPU_RAX(%_ASM_AX), %_ASM_AX
 
-       /* Enter guest mode */
-       call vmx_vmenter
+       /* Check EFLAGS.ZF from 'testb' above */
+       jz .Lvmlaunch
+
+       /*
+        * After a successful VMRESUME/VMLAUNCH, control flow "magically"
+        * resumes below at 'vmx_vmexit' due to the VMCS HOST_RIP setting.
+        * So this isn't a typical function and objtool needs to be told to
+        * save the unwind state here and restore it below.
+        */
+       UNWIND_HINT_SAVE
+
+/*
+ * If VMRESUME/VMLAUNCH and corresponding vmexit succeed, execution resumes at
+ * the 'vmx_vmexit' label below.
+ */
+.Lvmresume:
+       vmresume
+       jmp .Lvmfail
+
+.Lvmlaunch:
+       vmlaunch
+       jmp .Lvmfail
 
-       /* Jump on VM-Fail. */
-       jbe 2f
+       _ASM_EXTABLE(.Lvmresume, .Lfixup)
+       _ASM_EXTABLE(.Lvmlaunch, .Lfixup)
+
+SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
+
+       /* Restore unwind state from before the VMRESUME/VMLAUNCH. */
+       UNWIND_HINT_RESTORE
+       ENDBR
 
        /* Temporarily save guest's RAX. */
        push %_ASM_AX
@@ -185,21 +185,23 @@ SYM_FUNC_START(__vmx_vcpu_run)
        mov %r15, VCPU_R15(%_ASM_AX)
 #endif
 
-       /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
-       xor %eax, %eax
+       /* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */
+       xor %ebx, %ebx
 
+.Lclear_regs:
        /*
-        * Clear all general purpose registers except RSP and RAX to prevent
+        * Clear all general purpose registers except RSP and RBX to prevent
         * speculative use of the guest's values, even those that are reloaded
         * via the stack.  In theory, an L1 cache miss when restoring registers
         * could lead to speculative execution with the guest's values.
         * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
         * free.  RSP and RAX are exempt as RSP is restored by hardware during
-        * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
+        * VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return
+        * value.
         */
-1:     xor %ecx, %ecx
+       xor %eax, %eax
+       xor %ecx, %ecx
        xor %edx, %edx
-       xor %ebx, %ebx
        xor %ebp, %ebp
        xor %esi, %esi
        xor %edi, %edi
@@ -216,8 +218,30 @@ SYM_FUNC_START(__vmx_vcpu_run)
 
        /* "POP" @regs. */
        add $WORD_SIZE, %_ASM_SP
-       pop %_ASM_BX
 
+       /*
+        * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
+        * the first unbalanced RET after vmexit!
+        *
+        * For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB
+        * entries and (in some cases) RSB underflow.
+        *
+        * eIBRS has its own protection against poisoned RSB, so it doesn't
+        * need the RSB filling sequence.  But it does need to be enabled
+        * before the first unbalanced RET.
+         */
+
+       FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
+
+       pop %_ASM_ARG2  /* @flags */
+       pop %_ASM_ARG1  /* @vmx */
+
+       call vmx_spec_ctrl_restore_host
+
+       /* Put return value in AX */
+       mov %_ASM_BX, %_ASM_AX
+
+       pop %_ASM_BX
 #ifdef CONFIG_X86_64
        pop %r12
        pop %r13
@@ -230,9 +254,15 @@ SYM_FUNC_START(__vmx_vcpu_run)
        pop %_ASM_BP
        RET
 
-       /* VM-Fail.  Out-of-line to avoid a taken Jcc after VM-Exit. */
-2:     mov $1, %eax
-       jmp 1b
+.Lfixup:
+       cmpb $0, kvm_rebooting
+       jne .Lvmfail
+       ud2
+.Lvmfail:
+       /* VM-Fail: set return value to 1 */
+       mov $1, %_ASM_BX
+       jmp .Lclear_regs
+
 SYM_FUNC_END(__vmx_vcpu_run)
 
 
index 9bd86ec..be7c193 100644 (file)
@@ -229,6 +229,9 @@ static const struct {
 #define L1D_CACHE_ORDER 4
 static void *vmx_l1d_flush_pages;
 
+/* Control for disabling CPU Fill buffer clear */
+static bool __read_mostly vmx_fb_clear_ctrl_available;
+
 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
 {
        struct page *page;
@@ -360,6 +363,60 @@ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
        return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
 }
 
+static void vmx_setup_fb_clear_ctrl(void)
+{
+       u64 msr;
+
+       if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) &&
+           !boot_cpu_has_bug(X86_BUG_MDS) &&
+           !boot_cpu_has_bug(X86_BUG_TAA)) {
+               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
+               if (msr & ARCH_CAP_FB_CLEAR_CTRL)
+                       vmx_fb_clear_ctrl_available = true;
+       }
+}
+
+static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
+{
+       u64 msr;
+
+       if (!vmx->disable_fb_clear)
+               return;
+
+       msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL);
+       msr |= FB_CLEAR_DIS;
+       native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
+       /* Cache the MSR value to avoid reading it later */
+       vmx->msr_ia32_mcu_opt_ctrl = msr;
+}
+
+static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
+{
+       if (!vmx->disable_fb_clear)
+               return;
+
+       vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
+       native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
+}
+
+static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
+{
+       vmx->disable_fb_clear = vmx_fb_clear_ctrl_available;
+
+       /*
+        * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS
+        * at VMEntry. Skip the MSR read/write when a guest has no use case to
+        * execute VERW.
+        */
+       if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) ||
+          ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) &&
+           (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) &&
+           (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) &&
+           (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) &&
+           (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO)))
+               vmx->disable_fb_clear = false;
+}
+
 static const struct kernel_param_ops vmentry_l1d_flush_ops = {
        .set = vmentry_l1d_flush_set,
        .get = vmentry_l1d_flush_get,
@@ -782,6 +839,24 @@ static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
                                         MSR_IA32_SPEC_CTRL);
 }
 
+unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
+{
+       unsigned int flags = 0;
+
+       if (vmx->loaded_vmcs->launched)
+               flags |= VMX_RUN_VMRESUME;
+
+       /*
+        * If writes to the SPEC_CTRL MSR aren't intercepted, the guest is free
+        * to change it directly without causing a vmexit.  In that case read
+        * it after vmexit and store it in vmx->spec_ctrl.
+        */
+       if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)))
+               flags |= VMX_RUN_SAVE_SPEC_CTRL;
+
+       return flags;
+}
+
 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
                unsigned long entry, unsigned long exit)
 {
@@ -2252,6 +2327,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        ret = kvm_set_msr_common(vcpu, msr_info);
        }
 
+       /* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */
+       if (msr_index == MSR_IA32_ARCH_CAPABILITIES)
+               vmx_update_fb_clear_dis(vcpu, vmx);
+
        return ret;
 }
 
@@ -4553,6 +4632,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
        kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
 
        vpid_sync_context(vmx->vpid);
+
+       vmx_update_fb_clear_dis(vcpu, vmx);
 }
 
 static void vmx_enable_irq_window(struct kvm_vcpu *vcpu)
@@ -6750,6 +6831,31 @@ void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
        }
 }
 
+void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
+                                       unsigned int flags)
+{
+       u64 hostval = this_cpu_read(x86_spec_ctrl_current);
+
+       if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL))
+               return;
+
+       if (flags & VMX_RUN_SAVE_SPEC_CTRL)
+               vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
+
+       /*
+        * If the guest/host SPEC_CTRL values differ, restore the host value.
+        *
+        * For legacy IBRS, the IBRS bit always needs to be written after
+        * transitioning from a less privileged predictor mode, regardless of
+        * whether the guest/host values differ.
+        */
+       if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
+           vmx->spec_ctrl != hostval)
+               native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval);
+
+       barrier_nospec();
+}
+
 static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
 {
        switch (to_vmx(vcpu)->exit_reason.basic) {
@@ -6763,7 +6869,8 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
 }
 
 static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
-                                       struct vcpu_vmx *vmx)
+                                       struct vcpu_vmx *vmx,
+                                       unsigned long flags)
 {
        guest_state_enter_irqoff();
 
@@ -6772,15 +6879,22 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
                vmx_l1d_flush(vcpu);
        else if (static_branch_unlikely(&mds_user_clear))
                mds_clear_cpu_buffers();
+       else if (static_branch_unlikely(&mmio_stale_data_clear) &&
+                kvm_arch_has_assigned_device(vcpu->kvm))
+               mds_clear_cpu_buffers();
+
+       vmx_disable_fb_clear(vmx);
 
        if (vcpu->arch.cr2 != native_read_cr2())
                native_write_cr2(vcpu->arch.cr2);
 
        vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
-                                  vmx->loaded_vmcs->launched);
+                                  flags);
 
        vcpu->arch.cr2 = native_read_cr2();
 
+       vmx_enable_fb_clear(vmx);
+
        guest_state_exit_irqoff();
 }
 
@@ -6874,36 +6988,8 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
        kvm_wait_lapic_expire(vcpu);
 
-       /*
-        * If this vCPU has touched SPEC_CTRL, restore the guest's value if
-        * it's non-zero. Since vmentry is serialising on affected CPUs, there
-        * is no need to worry about the conditional branch over the wrmsr
-        * being speculatively taken.
-        */
-       x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
-
        /* The actual VMENTER/EXIT is in the .noinstr.text section. */
-       vmx_vcpu_enter_exit(vcpu, vmx);
-
-       /*
-        * We do not use IBRS in the kernel. If this vCPU has used the
-        * SPEC_CTRL MSR it may have left it on; save the value and
-        * turn it off. This is much more efficient than blindly adding
-        * it to the atomic save/restore list. Especially as the former
-        * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
-        *
-        * For non-nested case:
-        * If the L01 MSR bitmap does not intercept the MSR, then we need to
-        * save it.
-        *
-        * For nested case:
-        * If the L02 MSR bitmap does not intercept the MSR, then we need to
-        * save it.
-        */
-       if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)))
-               vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
-
-       x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
+       vmx_vcpu_enter_exit(vcpu, vmx, __vmx_vcpu_run_flags(vmx));
 
        /* All fields are clean at this point */
        if (static_branch_unlikely(&enable_evmcs)) {
@@ -7709,7 +7795,9 @@ static bool vmx_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
        ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
                          BIT(APICV_INHIBIT_REASON_ABSENT) |
                          BIT(APICV_INHIBIT_REASON_HYPERV) |
-                         BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
+                         BIT(APICV_INHIBIT_REASON_BLOCKIRQ) |
+                         BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) |
+                         BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
 
        return supported & BIT(reason);
 }
@@ -8212,6 +8300,8 @@ static int __init vmx_init(void)
                return r;
        }
 
+       vmx_setup_fb_clear_ctrl();
+
        for_each_possible_cpu(cpu) {
                INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
 
index b98c7e9..1e7f945 100644 (file)
@@ -8,11 +8,12 @@
 #include <asm/intel_pt.h>
 
 #include "capabilities.h"
-#include "kvm_cache_regs.h"
+#include "../kvm_cache_regs.h"
 #include "posted_intr.h"
 #include "vmcs.h"
 #include "vmx_ops.h"
-#include "cpuid.h"
+#include "../cpuid.h"
+#include "run_flags.h"
 
 #define MSR_TYPE_R     1
 #define MSR_TYPE_W     2
@@ -348,6 +349,8 @@ struct vcpu_vmx {
        u64 msr_ia32_feature_control_valid_bits;
        /* SGX Launch Control public key hash */
        u64 msr_ia32_sgxlepubkeyhash[4];
+       u64 msr_ia32_mcu_opt_ctrl;
+       bool disable_fb_clear;
 
        struct pt_desc pt_desc;
        struct lbr_desc lbr_desc;
@@ -402,7 +405,10 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
-bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
+void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags);
+unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx);
+bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
+                   unsigned int flags);
 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
 
index 5e7f412..5cfc49d 100644 (file)
@@ -8,7 +8,7 @@
 
 #include "evmcs.h"
 #include "vmcs.h"
-#include "x86.h"
+#include "../x86.h"
 
 asmlinkage void vmread_error(unsigned long field, bool fault);
 __attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
index 03fbfbb..e5fa335 100644 (file)
@@ -298,7 +298,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
        STATS_DESC_COUNTER(VCPU, directed_yield_successful),
        STATS_DESC_COUNTER(VCPU, preemption_reported),
        STATS_DESC_COUNTER(VCPU, preemption_other),
-       STATS_DESC_ICOUNTER(VCPU, guest_mode)
+       STATS_DESC_IBOOLEAN(VCPU, guest_mode)
 };
 
 const struct kvm_stats_header kvm_vcpu_stats_header = {
@@ -1617,6 +1617,9 @@ static u64 kvm_get_arch_capabilities(void)
                 */
        }
 
+       /* Guests don't need to know "Fill buffer clear control" exists */
+       data &= ~ARCH_CAP_FB_CLEAR_CTRL;
+
        return data;
 }
 
@@ -6026,6 +6029,11 @@ split_irqchip_unlock:
                r = 0;
                break;
        case KVM_CAP_X86_USER_SPACE_MSR:
+               r = -EINVAL;
+               if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL |
+                                    KVM_MSR_EXIT_REASON_UNKNOWN |
+                                    KVM_MSR_EXIT_REASON_FILTER))
+                       break;
                kvm->arch.user_space_msr_mask = cap->args[0];
                r = 0;
                break;
@@ -6180,6 +6188,9 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
        if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
                return -EFAULT;
 
+       if (filter.flags & ~KVM_MSR_FILTER_DEFAULT_DENY)
+               return -EINVAL;
+
        for (i = 0; i < ARRAY_SIZE(filter.ranges); i++)
                empty &= !filter.ranges[i].nmsrs;
 
@@ -9140,15 +9151,17 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
  */
 static void kvm_pv_kick_cpu_op(struct kvm *kvm, int apicid)
 {
-       struct kvm_lapic_irq lapic_irq;
-
-       lapic_irq.shorthand = APIC_DEST_NOSHORT;
-       lapic_irq.dest_mode = APIC_DEST_PHYSICAL;
-       lapic_irq.level = 0;
-       lapic_irq.dest_id = apicid;
-       lapic_irq.msi_redir_hint = false;
+       /*
+        * All other fields are unused for APIC_DM_REMRD, but may be consumed by
+        * common code, e.g. for tracing. Defer initialization to the compiler.
+        */
+       struct kvm_lapic_irq lapic_irq = {
+               .delivery_mode = APIC_DM_REMRD,
+               .dest_mode = APIC_DEST_PHYSICAL,
+               .shorthand = APIC_DEST_NOSHORT,
+               .dest_id = apicid,
+       };
 
-       lapic_irq.delivery_mode = APIC_DM_REMRD;
        kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
 }
 
@@ -9850,6 +9863,7 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
                return;
 
        down_read(&vcpu->kvm->arch.apicv_update_lock);
+       preempt_disable();
 
        activate = kvm_vcpu_apicv_activated(vcpu);
 
@@ -9870,6 +9884,7 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
                kvm_make_request(KVM_REQ_EVENT, vcpu);
 
 out:
+       preempt_enable();
        up_read(&vcpu->kvm->arch.apicv_update_lock);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
@@ -12626,9 +12641,9 @@ void kvm_arch_end_assignment(struct kvm *kvm)
 }
 EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
 
-bool kvm_arch_has_assigned_device(struct kvm *kvm)
+bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
 {
-       return atomic_read(&kvm->arch.assigned_device_count);
+       return arch_atomic_read(&kvm->arch.assigned_device_count);
 }
 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
 
index d83cba3..724bbf8 100644 (file)
@@ -39,7 +39,7 @@ SYM_FUNC_START(__memmove)
        /* FSRM implies ERMS => no length checks, do the copy directly */
 .Lmemmove_begin_forward:
        ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM
-       ALTERNATIVE "", __stringify(movq %rdx, %rcx; rep movsb; RET), X86_FEATURE_ERMS
+       ALTERNATIVE "", "jmp .Lmemmove_erms", X86_FEATURE_ERMS
 
        /*
         * movsq instruction have many startup latency
@@ -205,6 +205,11 @@ SYM_FUNC_START(__memmove)
        movb %r11b, (%rdi)
 13:
        RET
+
+.Lmemmove_erms:
+       movq %rdx, %rcx
+       rep movsb
+       RET
 SYM_FUNC_END(__memmove)
 EXPORT_SYMBOL(__memmove)
 
index b2b2366..073289a 100644 (file)
@@ -33,9 +33,9 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
        UNWIND_HINT_EMPTY
        ANNOTATE_NOENDBR
 
-       ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
-                     __stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \
-                     __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE
+       ALTERNATIVE_2 __stringify(RETPOLINE \reg), \
+                     __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE, \
+                     __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_NOT(X86_FEATURE_RETPOLINE)
 
 .endm
 
@@ -67,3 +67,76 @@ SYM_CODE_END(__x86_indirect_thunk_array)
 #define GEN(reg) EXPORT_THUNK(reg)
 #include <asm/GEN-for-each-reg.h>
 #undef GEN
+
+/*
+ * This function name is magical and is used by -mfunction-return=thunk-extern
+ * for the compiler to generate JMPs to it.
+ */
+#ifdef CONFIG_RETHUNK
+
+       .section .text.__x86.return_thunk
+
+/*
+ * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
+ * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for
+ *    alignment within the BTB.
+ * 2) The instruction at zen_untrain_ret must contain, and not
+ *    end with, the 0xc3 byte of the RET.
+ * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
+ *    from re-poisioning the BTB prediction.
+ */
+       .align 64
+       .skip 63, 0xcc
+SYM_FUNC_START_NOALIGN(zen_untrain_ret);
+
+       /*
+        * As executed from zen_untrain_ret, this is:
+        *
+        *   TEST $0xcc, %bl
+        *   LFENCE
+        *   JMP __x86_return_thunk
+        *
+        * Executing the TEST instruction has a side effect of evicting any BTB
+        * prediction (potentially attacker controlled) attached to the RET, as
+        * __x86_return_thunk + 1 isn't an instruction boundary at the moment.
+        */
+       .byte   0xf6
+
+       /*
+        * As executed from __x86_return_thunk, this is a plain RET.
+        *
+        * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
+        *
+        * We subsequently jump backwards and architecturally execute the RET.
+        * This creates a correct BTB prediction (type=ret), but in the
+        * meantime we suffer Straight Line Speculation (because the type was
+        * no branch) which is halted by the INT3.
+        *
+        * With SMT enabled and STIBP active, a sibling thread cannot poison
+        * RET's prediction to a type of its choice, but can evict the
+        * prediction due to competitive sharing. If the prediction is
+        * evicted, __x86_return_thunk will suffer Straight Line Speculation
+        * which will be contained safely by the INT3.
+        */
+SYM_INNER_LABEL(__x86_return_thunk, SYM_L_GLOBAL)
+       ret
+       int3
+SYM_CODE_END(__x86_return_thunk)
+
+       /*
+        * Ensure the TEST decoding / BTB invalidation is complete.
+        */
+       lfence
+
+       /*
+        * Jump back and execute the RET in the middle of the TEST instruction.
+        * INT3 is for SLS protection.
+        */
+       jmp __x86_return_thunk
+       int3
+SYM_FUNC_END(zen_untrain_ret)
+__EXPORT_THUNK(zen_untrain_ret)
+
+EXPORT_SYMBOL(__x86_return_thunk)
+
+#endif /* CONFIG_RETHUNK */
index dba2197..331310c 100644 (file)
@@ -94,16 +94,18 @@ static bool ex_handler_copy(const struct exception_table_entry *fixup,
 static bool ex_handler_msr(const struct exception_table_entry *fixup,
                           struct pt_regs *regs, bool wrmsr, bool safe, int reg)
 {
-       if (!safe && wrmsr &&
-           pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
-                        (unsigned int)regs->cx, (unsigned int)regs->dx,
-                        (unsigned int)regs->ax,  regs->ip, (void *)regs->ip))
+       if (__ONCE_LITE_IF(!safe && wrmsr)) {
+               pr_warn("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
+                       (unsigned int)regs->cx, (unsigned int)regs->dx,
+                       (unsigned int)regs->ax,  regs->ip, (void *)regs->ip);
                show_stack_regs(regs);
+       }
 
-       if (!safe && !wrmsr &&
-           pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
-                        (unsigned int)regs->cx, regs->ip, (void *)regs->ip))
+       if (__ONCE_LITE_IF(!safe && !wrmsr)) {
+               pr_warn("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
+                       (unsigned int)regs->cx, regs->ip, (void *)regs->ip);
                show_stack_regs(regs);
+       }
 
        if (!wrmsr) {
                /* Pretend that the read succeeded and returned 0. */
index d8cfce2..82a042c 100644 (file)
@@ -77,10 +77,20 @@ static uint8_t __pte2cachemode_tbl[8] = {
        [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
 };
 
-/* Check that the write-protect PAT entry is set for write-protect */
+/*
+ * Check that the write-protect PAT entry is set for write-protect.
+ * To do this without making assumptions how PAT has been set up (Xen has
+ * another layout than the kernel), translate the _PAGE_CACHE_MODE_WP cache
+ * mode via the __cachemode2pte_tbl[] into protection bits (those protection
+ * bits will select a cache mode of WP or better), and then translate the
+ * protection bits back into the cache mode using __pte2cm_idx() and the
+ * __pte2cachemode_tbl[] array. This will return the really used cache mode.
+ */
 bool x86_has_pat_wp(void)
 {
-       return __pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] == _PAGE_CACHE_MODE_WP;
+       uint16_t prot = __cachemode2pte_tbl[_PAGE_CACHE_MODE_WP];
+
+       return __pte2cachemode_tbl[__pte2cm_idx(prot)] == _PAGE_CACHE_MODE_WP;
 }
 
 enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
@@ -846,7 +856,7 @@ int devmem_is_allowed(unsigned long pagenr)
 
        /*
         * This must follow RAM test, since System RAM is considered a
-        * restricted resource under CONFIG_STRICT_IOMEM.
+        * restricted resource under CONFIG_STRICT_DEVMEM.
         */
        if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
                /* Low 1MB bypasses iomem restrictions. */
index 3d1dba0..9de3d90 100644 (file)
@@ -65,7 +65,10 @@ SYM_FUNC_START(sme_encrypt_execute)
        movq    %rbp, %rsp              /* Restore original stack pointer */
        pop     %rbp
 
-       RET
+       /* Offset to __x86_return_thunk would be wrong here */
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
 SYM_FUNC_END(sme_encrypt_execute)
 
 SYM_FUNC_START(__enc_copy)
@@ -151,6 +154,9 @@ SYM_FUNC_START(__enc_copy)
        pop     %r12
        pop     %r15
 
-       RET
+       /* Offset to __x86_return_thunk would be wrong here */
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
 .L__enc_copy_end:
 SYM_FUNC_END(__enc_copy)
index e44e938..7418c36 100644 (file)
@@ -110,7 +110,7 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey
        return vma_pkey(vma);
 }
 
-#define PKRU_AD_KEY(pkey)      (PKRU_AD_BIT << ((pkey) * PKRU_BITS_PER_PKEY))
+#define PKRU_AD_MASK(pkey)     (PKRU_AD_BIT << ((pkey) * PKRU_BITS_PER_PKEY))
 
 /*
  * Make the default PKRU value (at execve() time) as restrictive
@@ -118,11 +118,14 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey
  * in the process's lifetime will not accidentally get access
  * to data which is pkey-protected later on.
  */
-u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
-                     PKRU_AD_KEY( 4) | PKRU_AD_KEY( 5) | PKRU_AD_KEY( 6) |
-                     PKRU_AD_KEY( 7) | PKRU_AD_KEY( 8) | PKRU_AD_KEY( 9) |
-                     PKRU_AD_KEY(10) | PKRU_AD_KEY(11) | PKRU_AD_KEY(12) |
-                     PKRU_AD_KEY(13) | PKRU_AD_KEY(14) | PKRU_AD_KEY(15);
+u32 init_pkru_value = PKRU_AD_MASK( 1) | PKRU_AD_MASK( 2) |
+                     PKRU_AD_MASK( 3) | PKRU_AD_MASK( 4) |
+                     PKRU_AD_MASK( 5) | PKRU_AD_MASK( 6) |
+                     PKRU_AD_MASK( 7) | PKRU_AD_MASK( 8) |
+                     PKRU_AD_MASK( 9) | PKRU_AD_MASK(10) |
+                     PKRU_AD_MASK(11) | PKRU_AD_MASK(12) |
+                     PKRU_AD_MASK(13) | PKRU_AD_MASK(14) |
+                     PKRU_AD_MASK(15);
 
 static ssize_t init_pkru_read_file(struct file *file, char __user *user_buf,
                             size_t count, loff_t *ppos)
index d400b6d..c1e31e9 100644 (file)
@@ -734,10 +734,10 @@ static void flush_tlb_func(void *info)
        const struct flush_tlb_info *f = info;
        struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
        u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
-       u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
        u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
        bool local = smp_processor_id() == f->initiating_cpu;
        unsigned long nr_invalidate = 0;
+       u64 mm_tlb_gen;
 
        /* This code cannot presently handle being reentered. */
        VM_WARN_ON(!irqs_disabled());
@@ -771,6 +771,23 @@ static void flush_tlb_func(void *info)
                return;
        }
 
+       if (unlikely(f->new_tlb_gen != TLB_GENERATION_INVALID &&
+                    f->new_tlb_gen <= local_tlb_gen)) {
+               /*
+                * The TLB is already up to date in respect to f->new_tlb_gen.
+                * While the core might be still behind mm_tlb_gen, checking
+                * mm_tlb_gen unnecessarily would have negative caching effects
+                * so avoid it.
+                */
+               return;
+       }
+
+       /*
+        * Defer mm_tlb_gen reading as long as possible to avoid cache
+        * contention.
+        */
+       mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
+
        if (unlikely(local_tlb_gen == mm_tlb_gen)) {
                /*
                 * There's nothing to do: we're already up to date.  This can
@@ -827,6 +844,12 @@ static void flush_tlb_func(void *info)
                /* Partial flush */
                unsigned long addr = f->start;
 
+               /* Partial flush cannot have invalid generations */
+               VM_WARN_ON(f->new_tlb_gen == TLB_GENERATION_INVALID);
+
+               /* Partial flush must have valid mm */
+               VM_WARN_ON(f->mm == NULL);
+
                nr_invalidate = (f->end - f->start) >> f->stride_shift;
 
                while (addr < f->end) {
@@ -1029,7 +1052,8 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
                struct flush_tlb_info *info;
 
                preempt_disable();
-               info = get_flush_tlb_info(NULL, start, end, 0, false, 0);
+               info = get_flush_tlb_info(NULL, start, end, 0, false,
+                                         TLB_GENERATION_INVALID);
 
                on_each_cpu(do_kernel_range_flush, info, 1);
 
@@ -1198,7 +1222,8 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
 
        int cpu = get_cpu();
 
-       info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, 0, false, 0);
+       info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, 0, false,
+                                 TLB_GENERATION_INVALID);
        /*
         * flush_tlb_multi() is not optimized for the common case in which only
         * a local TLB flush is needed. Optimize this use-case by calling
index f298b18..b808c9a 100644 (file)
@@ -412,16 +412,30 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
 {
        u8 *prog = *pprog;
 
-#ifdef CONFIG_RETPOLINE
        if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
                EMIT_LFENCE();
                EMIT2(0xFF, 0xE0 + reg);
        } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
                OPTIMIZER_HIDE_VAR(reg);
                emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
-       } else
-#endif
-       EMIT2(0xFF, 0xE0 + reg);
+       } else {
+               EMIT2(0xFF, 0xE0 + reg);
+       }
+
+       *pprog = prog;
+}
+
+static void emit_return(u8 **pprog, u8 *ip)
+{
+       u8 *prog = *pprog;
+
+       if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
+               emit_jump(&prog, &__x86_return_thunk, ip);
+       } else {
+               EMIT1(0xC3);            /* ret */
+               if (IS_ENABLED(CONFIG_SLS))
+                       EMIT1(0xCC);    /* int3 */
+       }
 
        *pprog = prog;
 }
@@ -1420,8 +1434,9 @@ st:                       if (is_imm8(insn->off))
                case BPF_JMP | BPF_CALL:
                        func = (u8 *) __bpf_call_base + imm32;
                        if (tail_call_reachable) {
+                               /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
                                EMIT3_off32(0x48, 0x8B, 0x85,
-                                           -(bpf_prog->aux->stack_depth + 8));
+                                           -round_up(bpf_prog->aux->stack_depth, 8) - 8);
                                if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
                                        return -EINVAL;
                        } else {
@@ -1685,7 +1700,7 @@ emit_jmp:
                        ctx->cleanup_addr = proglen;
                        pop_callee_regs(&prog, callee_regs_used);
                        EMIT1(0xC9);         /* leave */
-                       EMIT1(0xC3);         /* ret */
+                       emit_return(&prog, image + addrs[i - 1] + (prog - temp));
                        break;
 
                default:
@@ -2188,7 +2203,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
        if (flags & BPF_TRAMP_F_SKIP_FRAME)
                /* skip our return address and return to parent */
                EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
-       EMIT1(0xC3); /* ret */
+       emit_return(&prog, prog);
        /* Make sure the trampoline generation logic doesn't overflow */
        if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
                ret = -EFAULT;
index a4f4305..2f82480 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/pci-acpi.h>
 #include <asm/numa.h>
 #include <asm/pci_x86.h>
-#include <asm/e820/api.h>
 
 struct pci_root_info {
        struct acpi_pci_root_info common;
@@ -20,7 +19,7 @@ struct pci_root_info {
 #endif
 };
 
-static bool pci_use_e820 = true;
+bool pci_use_e820 = true;
 static bool pci_use_crs = true;
 static bool pci_ignore_seg;
 
@@ -387,11 +386,6 @@ static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
 
        status = acpi_pci_probe_root_resources(ci);
 
-       if (pci_use_e820) {
-               resource_list_for_each_entry(entry, &ci->resources)
-                       remove_e820_regions(&device->dev, entry->res);
-       }
-
        if (pci_use_crs) {
                resource_list_for_each_entry_safe(entry, tmp, &ci->resources)
                        if (resource_is_pcicfg_ioport(entry->res))
index 9ffe2ba..4e5257a 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/objtool.h>
 #include <asm/page_types.h>
 #include <asm/segment.h>
+#include <asm/nospec-branch.h>
 
        .text
        .code64
@@ -75,7 +76,9 @@ STACK_FRAME_NON_STANDARD __efi64_thunk
 1:     movq    0x20(%rsp), %rsp
        pop     %rbx
        pop     %rbp
-       RET
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
 
        .code32
 2:     pushl   $__KERNEL_CS
index ae53d54..31c634a 100644 (file)
@@ -73,12 +73,6 @@ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
 $(obj)/purgatory.chk: $(obj)/purgatory.ro FORCE
                $(call if_changed,ld)
 
-targets += kexec-purgatory.c
+$(obj)/kexec-purgatory.o: $(obj)/purgatory.ro $(obj)/purgatory.chk
 
-quiet_cmd_bin2c = BIN2C   $@
-      cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
-
-$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro $(obj)/purgatory.chk FORCE
-       $(call if_changed,bin2c)
-
-obj-$(CONFIG_KEXEC_FILE)       += kexec-purgatory.o
+obj-y += kexec-purgatory.o
diff --git a/arch/x86/purgatory/kexec-purgatory.S b/arch/x86/purgatory/kexec-purgatory.S
new file mode 100644 (file)
index 0000000..8530fe9
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+       .section .rodata, "a"
+
+       .align  8
+kexec_purgatory:
+       .globl  kexec_purgatory
+       .incbin "arch/x86/purgatory/purgatory.ro"
+.Lkexec_purgatory_end:
+
+       .align  8
+kexec_purgatory_size:
+       .globl  kexec_purgatory_size
+       .quad   .Lkexec_purgatory_end - kexec_purgatory
index e3297b1..70fb2ea 100644 (file)
@@ -1183,15 +1183,19 @@ static void __init xen_domu_set_legacy_features(void)
 extern void early_xen_iret_patch(void);
 
 /* First C function to be called on Xen boot */
-asmlinkage __visible void __init xen_start_kernel(void)
+asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
 {
        struct physdev_set_iopl set_iopl;
        unsigned long initrd_start = 0;
        int rc;
 
-       if (!xen_start_info)
+       if (!si)
                return;
 
+       clear_bss();
+
+       xen_start_info = si;
+
        __text_gen_insn(&early_xen_iret_patch,
                        JMP32_INSN_OPCODE, &early_xen_iret_patch, &xen_iret,
                        JMP32_INSN_SIZE);
index 81aa46f..cfa99e8 100644 (file)
@@ -918,7 +918,7 @@ void xen_enable_sysenter(void)
        if (!boot_cpu_has(sysenter_feature))
                return;
 
-       ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
+       ret = register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat);
        if(ret != 0)
                setup_clear_cpu_cap(sysenter_feature);
 }
@@ -927,7 +927,7 @@ void xen_enable_syscall(void)
 {
        int ret;
 
-       ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
+       ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64);
        if (ret != 0) {
                printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
                /* Pretty fatal; 64-bit userspace has no other
@@ -936,7 +936,7 @@ void xen_enable_syscall(void)
 
        if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
                ret = register_callback(CALLBACKTYPE_syscall32,
-                                       xen_syscall32_target);
+                                       xen_entry_SYSCALL_compat);
                if (ret != 0)
                        setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
        }
index caa9bc2..6b4fdf6 100644 (file)
@@ -121,7 +121,7 @@ SYM_FUNC_END(xen_read_cr2_direct);
 
 .macro xen_pv_trap name
 SYM_CODE_START(xen_\name)
-       UNWIND_HINT_EMPTY
+       UNWIND_HINT_ENTRY
        ENDBR
        pop %rcx
        pop %r11
@@ -234,8 +234,8 @@ SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
  */
 
 /* Normal 64-bit system call target */
-SYM_CODE_START(xen_syscall_target)
-       UNWIND_HINT_EMPTY
+SYM_CODE_START(xen_entry_SYSCALL_64)
+       UNWIND_HINT_ENTRY
        ENDBR
        popq %rcx
        popq %r11
@@ -249,13 +249,13 @@ SYM_CODE_START(xen_syscall_target)
        movq $__USER_CS, 1*8(%rsp)
 
        jmp entry_SYSCALL_64_after_hwframe
-SYM_CODE_END(xen_syscall_target)
+SYM_CODE_END(xen_entry_SYSCALL_64)
 
 #ifdef CONFIG_IA32_EMULATION
 
 /* 32-bit compat syscall target */
-SYM_CODE_START(xen_syscall32_target)
-       UNWIND_HINT_EMPTY
+SYM_CODE_START(xen_entry_SYSCALL_compat)
+       UNWIND_HINT_ENTRY
        ENDBR
        popq %rcx
        popq %r11
@@ -269,11 +269,11 @@ SYM_CODE_START(xen_syscall32_target)
        movq $__USER32_CS, 1*8(%rsp)
 
        jmp entry_SYSCALL_compat_after_hwframe
-SYM_CODE_END(xen_syscall32_target)
+SYM_CODE_END(xen_entry_SYSCALL_compat)
 
 /* 32-bit compat sysenter target */
-SYM_CODE_START(xen_sysenter_target)
-       UNWIND_HINT_EMPTY
+SYM_CODE_START(xen_entry_SYSENTER_compat)
+       UNWIND_HINT_ENTRY
        ENDBR
        /*
         * NB: Xen is polite and clears TF from EFLAGS for us.  This means
@@ -291,19 +291,19 @@ SYM_CODE_START(xen_sysenter_target)
        movq $__USER32_CS, 1*8(%rsp)
 
        jmp entry_SYSENTER_compat_after_hwframe
-SYM_CODE_END(xen_sysenter_target)
+SYM_CODE_END(xen_entry_SYSENTER_compat)
 
 #else /* !CONFIG_IA32_EMULATION */
 
-SYM_CODE_START(xen_syscall32_target)
-SYM_CODE_START(xen_sysenter_target)
-       UNWIND_HINT_EMPTY
+SYM_CODE_START(xen_entry_SYSCALL_compat)
+SYM_CODE_START(xen_entry_SYSENTER_compat)
+       UNWIND_HINT_ENTRY
        ENDBR
        lea 16(%rsp), %rsp      /* strip %rcx, %r11 */
        mov $-ENOSYS, %rax
        pushq $0
        jmp hypercall_iret
-SYM_CODE_END(xen_sysenter_target)
-SYM_CODE_END(xen_syscall32_target)
+SYM_CODE_END(xen_entry_SYSENTER_compat)
+SYM_CODE_END(xen_entry_SYSCALL_compat)
 
 #endif /* CONFIG_IA32_EMULATION */
index 3a2cd93..ffaa621 100644 (file)
@@ -26,6 +26,7 @@ SYM_CODE_START(hypercall_page)
        .rept (PAGE_SIZE / 32)
                UNWIND_HINT_FUNC
                ANNOTATE_NOENDBR
+               ANNOTATE_UNRET_SAFE
                ret
                /*
                 * Xen will write the hypercall page, and sort out ENDBR.
@@ -48,15 +49,6 @@ SYM_CODE_START(startup_xen)
        ANNOTATE_NOENDBR
        cld
 
-       /* Clear .bss */
-       xor %eax,%eax
-       mov $__bss_start, %rdi
-       mov $__bss_stop, %rcx
-       sub %rdi, %rcx
-       shr $3, %rcx
-       rep stosq
-
-       mov %rsi, xen_start_info
        mov initial_stack(%rip), %rsp
 
        /* Set up %gs.
@@ -71,6 +63,7 @@ SYM_CODE_START(startup_xen)
        cdq
        wrmsr
 
+       mov     %rsi, %rdi
        call xen_start_kernel
 SYM_CODE_END(startup_xen)
        __FINIT
index fd0fec6..9a8bb97 100644 (file)
 /* These are code, but not functions.  Defined in entry.S */
 extern const char xen_failsafe_callback[];
 
-void xen_sysenter_target(void);
+void xen_entry_SYSENTER_compat(void);
 #ifdef CONFIG_X86_64
-void xen_syscall_target(void);
-void xen_syscall32_target(void);
+void xen_entry_SYSCALL_64(void);
+void xen_entry_SYSCALL_compat(void);
 #endif
 
 extern void *xen_initial_gdt;
index e3eae64..ab30bcb 100644 (file)
@@ -2173,7 +2173,7 @@ ENDPROC(ret_from_kernel_thread)
 
 #ifdef CONFIG_HIBERNATION
 
-       .bss
+       .section        .bss, "aw"
        .align  4
 .Lsaved_regs:
 #if defined(__XTENSA_WINDOWED_ABI__)
index e8ceb15..16b8a62 100644 (file)
@@ -154,6 +154,7 @@ static void __init calibrate_ccount(void)
        cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
        if (cpu) {
                clk = of_clk_get(cpu, 0);
+               of_node_put(cpu);
                if (!IS_ERR(clk)) {
                        ccount_freq = clk_get_rate(clk);
                        return;
index 538e674..c79c1d0 100644 (file)
@@ -133,6 +133,7 @@ static int __init machine_setup(void)
 
        if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
                update_local_mac(eth);
+       of_node_put(eth);
        return 0;
 }
 arch_initcall(machine_setup);
index 0d46cb7..e6d7e6b 100644 (file)
@@ -7046,6 +7046,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
        spin_unlock_irq(&bfqd->lock);
 #endif
 
+       blk_stat_disable_accounting(bfqd->queue);
        wbt_enable_default(bfqd->queue);
 
        kfree(bfqd);
@@ -7188,7 +7189,12 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
        bfq_init_root_group(bfqd->root_group, bfqd);
        bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
 
+       /* We dispatch from request queue wide instead of hw queue */
+       blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
+
        wbt_disable_default(q);
+       blk_stat_enable_accounting(q);
+
        return 0;
 
 out_free:
index 06ff5bb..27fb135 100644 (file)
@@ -322,19 +322,6 @@ void blk_cleanup_queue(struct request_queue *q)
                blk_mq_exit_queue(q);
        }
 
-       /*
-        * In theory, request pool of sched_tags belongs to request queue.
-        * However, the current implementation requires tag_set for freeing
-        * requests, so free the pool now.
-        *
-        * Queue has become frozen, there can't be any in-queue requests, so
-        * it is safe to free requests now.
-        */
-       mutex_lock(&q->sysfs_lock);
-       if (q->elevator)
-               blk_mq_sched_free_rqs(q);
-       mutex_unlock(&q->sysfs_lock);
-
        /* @q is and will stay empty, shutdown and put */
        blk_put_queue(q);
 }
index 56ed48d..47c89e6 100644 (file)
@@ -144,7 +144,6 @@ int disk_register_independent_access_ranges(struct gendisk *disk,
        }
 
        for (i = 0; i < iars->nr_ia_ranges; i++) {
-               iars->ia_range[i].queue = q;
                ret = kobject_init_and_add(&iars->ia_range[i].kobj,
                                           &blk_ia_range_ktype, &iars->kobj,
                                           "%d", i);
index 7771dac..f5e6527 100644 (file)
@@ -345,6 +345,7 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio,
                /* there isn't chance to merge the splitted bio */
                split->bi_opf |= REQ_NOMERGE;
 
+               blkcg_bio_issue_init(split);
                bio_chain(split, *bio);
                trace_block_split(split, (*bio)->bi_iter.bi_sector);
                submit_bio_noacct(*bio);
index 7e4136a..4d1ce9e 100644 (file)
@@ -711,11 +711,6 @@ void blk_mq_debugfs_register(struct request_queue *q)
        }
 }
 
-void blk_mq_debugfs_unregister(struct request_queue *q)
-{
-       q->sched_debugfs_dir = NULL;
-}
-
 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
                                        struct blk_mq_ctx *ctx)
 {
@@ -746,6 +741,8 @@ void blk_mq_debugfs_register_hctx(struct request_queue *q,
 
 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
 {
+       if (!hctx->queue->debugfs_dir)
+               return;
        debugfs_remove_recursive(hctx->debugfs_dir);
        hctx->sched_debugfs_dir = NULL;
        hctx->debugfs_dir = NULL;
@@ -773,6 +770,8 @@ void blk_mq_debugfs_register_sched(struct request_queue *q)
 {
        struct elevator_type *e = q->elevator->type;
 
+       lockdep_assert_held(&q->debugfs_mutex);
+
        /*
         * If the parent directory has not been created yet, return, we will be
         * called again later on and the directory/files will be created then.
@@ -790,6 +789,8 @@ void blk_mq_debugfs_register_sched(struct request_queue *q)
 
 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
 {
+       lockdep_assert_held(&q->debugfs_mutex);
+
        debugfs_remove_recursive(q->sched_debugfs_dir);
        q->sched_debugfs_dir = NULL;
 }
@@ -811,6 +812,10 @@ static const char *rq_qos_id_to_name(enum rq_qos_id id)
 
 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
 {
+       lockdep_assert_held(&rqos->q->debugfs_mutex);
+
+       if (!rqos->q->debugfs_dir)
+               return;
        debugfs_remove_recursive(rqos->debugfs_dir);
        rqos->debugfs_dir = NULL;
 }
@@ -820,6 +825,8 @@ void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
        struct request_queue *q = rqos->q;
        const char *dir_name = rq_qos_id_to_name(rqos->id);
 
+       lockdep_assert_held(&q->debugfs_mutex);
+
        if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
                return;
 
@@ -833,17 +840,13 @@ void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
        debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
 }
 
-void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
-{
-       debugfs_remove_recursive(q->rqos_debugfs_dir);
-       q->rqos_debugfs_dir = NULL;
-}
-
 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
                                        struct blk_mq_hw_ctx *hctx)
 {
        struct elevator_type *e = q->elevator->type;
 
+       lockdep_assert_held(&q->debugfs_mutex);
+
        /*
         * If the parent debugfs directory has not been created yet, return;
         * We will be called again later on with appropriate parent debugfs
@@ -863,6 +866,10 @@ void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
 
 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
 {
+       lockdep_assert_held(&hctx->queue->debugfs_mutex);
+
+       if (!hctx->queue->debugfs_dir)
+               return;
        debugfs_remove_recursive(hctx->sched_debugfs_dir);
        hctx->sched_debugfs_dir = NULL;
 }
index 69918f4..9c7d4b6 100644 (file)
@@ -21,7 +21,6 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);
 
 void blk_mq_debugfs_register(struct request_queue *q);
-void blk_mq_debugfs_unregister(struct request_queue *q);
 void blk_mq_debugfs_register_hctx(struct request_queue *q,
                                  struct blk_mq_hw_ctx *hctx);
 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
@@ -36,16 +35,11 @@ void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
 
 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos);
 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos);
-void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q);
 #else
 static inline void blk_mq_debugfs_register(struct request_queue *q)
 {
 }
 
-static inline void blk_mq_debugfs_unregister(struct request_queue *q)
-{
-}
-
 static inline void blk_mq_debugfs_register_hctx(struct request_queue *q,
                                                struct blk_mq_hw_ctx *hctx)
 {
@@ -87,10 +81,6 @@ static inline void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
 static inline void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
 {
 }
-
-static inline void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
-{
-}
 #endif
 
 #ifdef CONFIG_BLK_DEBUG_FS_ZONED
index 9e56a69..a4f7c10 100644 (file)
@@ -564,6 +564,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
        int ret;
 
        if (!e) {
+               blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
                q->elevator = NULL;
                q->nr_requests = q->tag_set->queue_depth;
                return 0;
@@ -593,7 +594,9 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
        if (ret)
                goto err_free_map_and_rqs;
 
+       mutex_lock(&q->debugfs_mutex);
        blk_mq_debugfs_register_sched(q);
+       mutex_unlock(&q->debugfs_mutex);
 
        queue_for_each_hw_ctx(q, hctx, i) {
                if (e->ops.init_hctx) {
@@ -606,7 +609,9 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
                                return ret;
                        }
                }
+               mutex_lock(&q->debugfs_mutex);
                blk_mq_debugfs_register_sched_hctx(q, hctx);
+               mutex_unlock(&q->debugfs_mutex);
        }
 
        return 0;
@@ -647,14 +652,21 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
        unsigned int flags = 0;
 
        queue_for_each_hw_ctx(q, hctx, i) {
+               mutex_lock(&q->debugfs_mutex);
                blk_mq_debugfs_unregister_sched_hctx(hctx);
+               mutex_unlock(&q->debugfs_mutex);
+
                if (e->type->ops.exit_hctx && hctx->sched_data) {
                        e->type->ops.exit_hctx(hctx, i);
                        hctx->sched_data = NULL;
                }
                flags = hctx->flags;
        }
+
+       mutex_lock(&q->debugfs_mutex);
        blk_mq_debugfs_unregister_sched(q);
+       mutex_unlock(&q->debugfs_mutex);
+
        if (e->type->ops.exit_sched)
                e->type->ops.exit_sched(e);
        blk_mq_sched_tags_teardown(q, flags);
index e9bf950..93d9d60 100644 (file)
@@ -579,6 +579,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
        if (!blk_mq_hw_queue_mapped(data.hctx))
                goto out_queue_exit;
        cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
+       if (cpu >= nr_cpu_ids)
+               goto out_queue_exit;
        data.ctx = __blk_mq_get_ctx(q, cpu);
 
        if (!q->elevator)
@@ -2141,20 +2143,6 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
 EXPORT_SYMBOL(blk_mq_run_hw_queue);
 
 /*
- * Is the request queue handled by an IO scheduler that does not respect
- * hardware queues when dispatching?
- */
-static bool blk_mq_has_sqsched(struct request_queue *q)
-{
-       struct elevator_queue *e = q->elevator;
-
-       if (e && e->type->ops.dispatch_request &&
-           !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
-               return true;
-       return false;
-}
-
-/*
  * Return prefered queue to dispatch from (if any) for non-mq aware IO
  * scheduler.
  */
@@ -2186,7 +2174,7 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async)
        unsigned long i;
 
        sq_hctx = NULL;
-       if (blk_mq_has_sqsched(q))
+       if (blk_queue_sq_sched(q))
                sq_hctx = blk_mq_get_sq_hctx(q);
        queue_for_each_hw_ctx(q, hctx, i) {
                if (blk_mq_hctx_stopped(hctx))
@@ -2214,7 +2202,7 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
        unsigned long i;
 
        sq_hctx = NULL;
-       if (blk_mq_has_sqsched(q))
+       if (blk_queue_sq_sched(q))
                sq_hctx = blk_mq_get_sq_hctx(q);
        queue_for_each_hw_ctx(q, hctx, i) {
                if (blk_mq_hctx_stopped(hctx))
@@ -2777,15 +2765,20 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
                return NULL;
        }
 
-       rq_qos_throttle(q, *bio);
-
        if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
                return NULL;
        if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
                return NULL;
 
-       rq->cmd_flags = (*bio)->bi_opf;
+       /*
+        * If any qos ->throttle() end up blocking, we will have flushed the
+        * plug and hence killed the cached_rq list as well. Pop this entry
+        * before we throttle.
+        */
        plug->cached_rq = rq_list_next(rq);
+       rq_qos_throttle(q, *bio);
+
+       rq->cmd_flags = (*bio)->bi_opf;
        INIT_LIST_HEAD(&rq->queuelist);
        return rq;
 }
@@ -3443,8 +3436,9 @@ static void blk_mq_exit_hctx(struct request_queue *q,
        if (blk_mq_hw_queue_mapped(hctx))
                blk_mq_tag_idle(hctx);
 
-       blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
-                       set->queue_depth, flush_rq);
+       if (blk_queue_init_done(q))
+               blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
+                               set->queue_depth, flush_rq);
        if (set->ops->exit_request)
                set->ops->exit_request(set, flush_rq, hctx_idx);
 
@@ -4438,12 +4432,14 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
        if (!qe)
                return false;
 
+       /* q->elevator needs protection from ->sysfs_lock */
+       mutex_lock(&q->sysfs_lock);
+
        INIT_LIST_HEAD(&qe->node);
        qe->q = q;
        qe->type = q->elevator->type;
        list_add(&qe->node, head);
 
-       mutex_lock(&q->sysfs_lock);
        /*
         * After elevator_switch_mq, the previous elevator_queue will be
         * released by elevator_release. The reference of the io scheduler
index e83af7b..d3a7569 100644 (file)
@@ -294,8 +294,6 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
 
 void rq_qos_exit(struct request_queue *q)
 {
-       blk_mq_debugfs_unregister_queue_rqos(q);
-
        while (q->rq_qos) {
                struct rq_qos *rqos = q->rq_qos;
                q->rq_qos = rqos->next;
index 6826700..0e46052 100644 (file)
@@ -104,8 +104,11 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
 
        blk_mq_unfreeze_queue(q);
 
-       if (rqos->ops->debugfs_attrs)
+       if (rqos->ops->debugfs_attrs) {
+               mutex_lock(&q->debugfs_mutex);
                blk_mq_debugfs_register_rqos(rqos);
+               mutex_unlock(&q->debugfs_mutex);
+       }
 }
 
 static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
@@ -129,7 +132,9 @@ static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
 
        blk_mq_unfreeze_queue(q);
 
+       mutex_lock(&q->debugfs_mutex);
        blk_mq_debugfs_unregister_rqos(rqos);
+       mutex_unlock(&q->debugfs_mutex);
 }
 
 typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
index 88bd41d..9b905e9 100644 (file)
@@ -779,14 +779,6 @@ static void blk_release_queue(struct kobject *kobj)
        if (queue_is_mq(q))
                blk_mq_release(q);
 
-       blk_trace_shutdown(q);
-       mutex_lock(&q->debugfs_mutex);
-       debugfs_remove_recursive(q->debugfs_dir);
-       mutex_unlock(&q->debugfs_mutex);
-
-       if (queue_is_mq(q))
-               blk_mq_debugfs_unregister(q);
-
        bioset_exit(&q->bio_split);
 
        if (blk_queue_has_srcu(q))
@@ -836,17 +828,16 @@ int blk_register_queue(struct gendisk *disk)
                goto unlock;
        }
 
+       if (queue_is_mq(q))
+               __blk_mq_register_dev(dev, q);
+       mutex_lock(&q->sysfs_lock);
+
        mutex_lock(&q->debugfs_mutex);
        q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
                                            blk_debugfs_root);
-       mutex_unlock(&q->debugfs_mutex);
-
-       if (queue_is_mq(q)) {
-               __blk_mq_register_dev(dev, q);
+       if (queue_is_mq(q))
                blk_mq_debugfs_register(q);
-       }
-
-       mutex_lock(&q->sysfs_lock);
+       mutex_unlock(&q->debugfs_mutex);
 
        ret = disk_register_independent_access_ranges(disk, NULL);
        if (ret)
@@ -948,8 +939,15 @@ void blk_unregister_queue(struct gendisk *disk)
        /* Now that we've deleted all child objects, we can delete the queue. */
        kobject_uevent(&q->kobj, KOBJ_REMOVE);
        kobject_del(&q->kobj);
-
        mutex_unlock(&q->sysfs_dir_lock);
 
+       mutex_lock(&q->debugfs_mutex);
+       blk_trace_shutdown(q);
+       debugfs_remove_recursive(q->debugfs_dir);
+       q->debugfs_dir = NULL;
+       q->sched_debugfs_dir = NULL;
+       q->rqos_debugfs_dir = NULL;
+       mutex_unlock(&q->debugfs_mutex);
+
        kobject_put(&disk_to_dev(disk)->kobj);
 }
index 27205ae..278227b 100644 (file)
@@ -623,6 +623,7 @@ void del_gendisk(struct gendisk *disk)
         * Prevent new I/O from crossing bio_queue_enter().
         */
        blk_queue_start_drain(q);
+       blk_mq_freeze_queue_wait(q);
 
        if (!(disk->flags & GENHD_FL_HIDDEN)) {
                sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
@@ -646,12 +647,21 @@ void del_gendisk(struct gendisk *disk)
        pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
        device_del(disk_to_dev(disk));
 
-       blk_mq_freeze_queue_wait(q);
-
        blk_throtl_cancel_bios(disk->queue);
 
        blk_sync_queue(q);
        blk_flush_integrity();
+       blk_mq_cancel_work_sync(q);
+
+       blk_mq_quiesce_queue(q);
+       if (q->elevator) {
+               mutex_lock(&q->sysfs_lock);
+               elevator_exit(q);
+               mutex_unlock(&q->sysfs_lock);
+       }
+       rq_qos_exit(q);
+       blk_mq_unquiesce_queue(q);
+
        /*
         * Allow using passthrough request again after the queue is torn down.
         */
@@ -1120,31 +1130,6 @@ static const struct attribute_group *disk_attr_groups[] = {
        NULL
 };
 
-static void disk_release_mq(struct request_queue *q)
-{
-       blk_mq_cancel_work_sync(q);
-
-       /*
-        * There can't be any non non-passthrough bios in flight here, but
-        * requests stay around longer, including passthrough ones so we
-        * still need to freeze the queue here.
-        */
-       blk_mq_freeze_queue(q);
-
-       /*
-        * Since the I/O scheduler exit code may access cgroup information,
-        * perform I/O scheduler exit before disassociating from the block
-        * cgroup controller.
-        */
-       if (q->elevator) {
-               mutex_lock(&q->sysfs_lock);
-               elevator_exit(q);
-               mutex_unlock(&q->sysfs_lock);
-       }
-       rq_qos_exit(q);
-       __blk_mq_unfreeze_queue(q, true);
-}
-
 /**
  * disk_release - releases all allocated resources of the gendisk
  * @dev: the device representing this disk
@@ -1166,9 +1151,6 @@ static void disk_release(struct device *dev)
        might_sleep();
        WARN_ON_ONCE(disk_live(disk));
 
-       if (queue_is_mq(disk->queue))
-               disk_release_mq(disk->queue);
-
        blkcg_exit_queue(disk->queue);
 
        disk_release_events(disk);
index 8d75028..5283bc8 100644 (file)
@@ -79,10 +79,6 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
 
        WARN_ON_ONCE(!bdev->bd_holder);
 
-       /* FIXME: remove the following once add_disk() handles errors */
-       if (WARN_ON(!bdev->bd_holder_dir))
-               goto out_unlock;
-
        holder = bd_find_holder_disk(bdev, disk);
        if (holder) {
                holder->refcnt++;
index 70ff2a5..8f7c745 100644 (file)
@@ -421,6 +421,8 @@ static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
 
        blk_stat_enable_accounting(q);
 
+       blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
+
        eq->elevator_data = kqd;
        q->elevator = eq;
 
@@ -1033,7 +1035,6 @@ static struct elevator_type kyber_sched = {
 #endif
        .elevator_attrs = kyber_sched_attrs,
        .elevator_name = "kyber",
-       .elevator_features = ELEVATOR_F_MQ_AWARE,
        .elevator_owner = THIS_MODULE,
 };
 
index 6ed602b..1a9e835 100644 (file)
@@ -642,6 +642,9 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
        spin_lock_init(&dd->lock);
        spin_lock_init(&dd->zone_lock);
 
+       /* We dispatch from request queue wide instead of hw queue */
+       blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
+
        q->elevator = eq;
        return 0;
 
index 56637ac..cec5465 100644 (file)
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
-/blacklist_hashes_checked
+/blacklist_hash_list
 /extract-cert
 /x509_certificate_list
 /x509_revocation_list
index 4767557..bf9b511 100644 (file)
@@ -43,6 +43,7 @@ config SYSTEM_TRUSTED_KEYRING
        bool "Provide system-wide ring of trusted keys"
        depends on KEYS
        depends on ASYMMETRIC_KEY_TYPE
+       depends on X509_CERTIFICATE_PARSER
        help
          Provide a system keyring to which trusted keys can be added.  Keys in
          the keyring are considered to be trusted.  Keys may be added at will
index cb1a9da..88a73b2 100644 (file)
@@ -3,26 +3,26 @@
 # Makefile for the linux kernel signature checking certificates.
 #
 
-obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o common.o
-obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist.o common.o
+obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o
+obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist.o
 obj-$(CONFIG_SYSTEM_REVOCATION_LIST) += revocation_certificates.o
 ifneq ($(CONFIG_SYSTEM_BLACKLIST_HASH_LIST),)
-quiet_cmd_check_blacklist_hashes = CHECK   $(patsubst "%",%,$(2))
-      cmd_check_blacklist_hashes = $(AWK) -f $(srctree)/scripts/check-blacklist-hashes.awk $(2); touch $@
 
-$(eval $(call config_filename,SYSTEM_BLACKLIST_HASH_LIST))
+$(obj)/blacklist_hashes.o: $(obj)/blacklist_hash_list
+CFLAGS_blacklist_hashes.o := -I $(obj)
 
-$(obj)/blacklist_hashes.o: $(obj)/blacklist_hashes_checked
+quiet_cmd_check_and_copy_blacklist_hash_list = GEN     $@
+      cmd_check_and_copy_blacklist_hash_list = \
+       $(AWK) -f $(srctree)/scripts/check-blacklist-hashes.awk $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) >&2; \
+       cat $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) > $@
 
-CFLAGS_blacklist_hashes.o += -I$(srctree)
-
-targets += blacklist_hashes_checked
-$(obj)/blacklist_hashes_checked: $(SYSTEM_BLACKLIST_HASH_LIST_SRCPREFIX)$(SYSTEM_BLACKLIST_HASH_LIST_FILENAME) scripts/check-blacklist-hashes.awk FORCE
-       $(call if_changed,check_blacklist_hashes,$(SYSTEM_BLACKLIST_HASH_LIST_SRCPREFIX)$(CONFIG_SYSTEM_BLACKLIST_HASH_LIST))
+$(obj)/blacklist_hash_list: $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) FORCE
+       $(call if_changed,check_and_copy_blacklist_hash_list)
 obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_hashes.o
 else
 obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_nohashes.o
 endif
+targets += blacklist_hash_list
 
 quiet_cmd_extract_certs  = CERT    $@
       cmd_extract_certs  = $(obj)/extract-cert $(extract-cert-in) $@
@@ -33,7 +33,7 @@ $(obj)/system_certificates.o: $(obj)/x509_certificate_list
 $(obj)/x509_certificate_list: $(CONFIG_SYSTEM_TRUSTED_KEYS) $(obj)/extract-cert FORCE
        $(call if_changed,extract_certs)
 
-targets += x509_certificate_list blacklist_hashes_checked
+targets += x509_certificate_list
 
 # If module signing is requested, say by allyesconfig, but a key has not been
 # supplied, then one will need to be generated to make sure the build does not
index 25094ea..41f1060 100644 (file)
 #include <linux/err.h>
 #include <linux/seq_file.h>
 #include <linux/uidgid.h>
-#include <linux/verification.h>
+#include <keys/asymmetric-type.h>
 #include <keys/system_keyring.h>
 #include "blacklist.h"
-#include "common.h"
 
 /*
  * According to crypto/asymmetric_keys/x509_cert_parser.c:x509_note_pkey_algo(),
@@ -365,8 +364,9 @@ static __init int load_revocation_certificate_list(void)
        if (revocation_certificate_list_size)
                pr_notice("Loading compiled-in revocation X.509 certificates\n");
 
-       return load_certificate_list(revocation_certificate_list, revocation_certificate_list_size,
-                                    blacklist_keyring);
+       return x509_load_certificate_list(revocation_certificate_list,
+                                         revocation_certificate_list_size,
+                                         blacklist_keyring);
 }
 late_initcall(load_revocation_certificate_list);
 #endif
index 3448923..86d66fe 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include "blacklist.h"
 
-const char __initdata *const blacklist_hashes[] = {
-#include CONFIG_SYSTEM_BLACKLIST_HASH_LIST
+const char __initconst *const blacklist_hashes[] = {
+#include "blacklist_hash_list"
        , NULL
 };
diff --git a/certs/common.h b/certs/common.h
deleted file mode 100644 (file)
index abdb579..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-
-#ifndef _CERT_COMMON_H
-#define _CERT_COMMON_H
-
-int load_certificate_list(const u8 cert_list[], const unsigned long list_size,
-                         const struct key *keyring);
-
-#endif
index 05b66ce..5042cc5 100644 (file)
@@ -16,7 +16,6 @@
 #include <keys/asymmetric-type.h>
 #include <keys/system_keyring.h>
 #include <crypto/pkcs7.h>
-#include "common.h"
 
 static struct key *builtin_trusted_keys;
 #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
@@ -183,7 +182,8 @@ __init int load_module_cert(struct key *keyring)
 
        pr_notice("Loading compiled-in module X.509 certificates\n");
 
-       return load_certificate_list(system_certificate_list, module_cert_size, keyring);
+       return x509_load_certificate_list(system_certificate_list,
+                                         module_cert_size, keyring);
 }
 
 /*
@@ -204,7 +204,7 @@ static __init int load_system_certificate_list(void)
        size = system_certificate_list_size - module_cert_size;
 #endif
 
-       return load_certificate_list(p, size, builtin_trusted_keys);
+       return x509_load_certificate_list(p, size, builtin_trusted_keys);
 }
 late_initcall(load_system_certificate_list);
 
index 1919746..7b81685 100644 (file)
@@ -15,6 +15,7 @@ source "crypto/async_tx/Kconfig"
 #
 menuconfig CRYPTO
        tristate "Cryptographic API"
+       select LIB_MEMNEQ
        help
          This option provides the core Cryptographic API.
 
@@ -665,6 +666,18 @@ config CRYPTO_CRC32_MIPS
          CRC32c and CRC32 CRC algorithms implemented using mips crypto
          instructions, when available.
 
+config CRYPTO_CRC32_S390
+       tristate "CRC-32 algorithms"
+       depends on S390
+       select CRYPTO_HASH
+       select CRC32
+       help
+         Select this option if you want to use hardware accelerated
+         implementations of CRC algorithms.  With this option, you
+         can optimize the computation of CRC-32 (IEEE 802.3 Ethernet)
+         and CRC-32C (Castagnoli).
+
+         It is available with IBM z13 or later.
 
 config CRYPTO_XXHASH
        tristate "xxHash hash algorithm"
@@ -897,6 +910,16 @@ config CRYPTO_SHA512_SSSE3
          Extensions version 1 (AVX1), or Advanced Vector Extensions
          version 2 (AVX2) instructions, when available.
 
+config CRYPTO_SHA512_S390
+       tristate "SHA384 and SHA512 digest algorithm"
+       depends on S390
+       select CRYPTO_HASH
+       help
+         This is the s390 hardware accelerated implementation of the
+         SHA512 secure hash standard.
+
+         It is available as of z10.
+
 config CRYPTO_SHA1_OCTEON
        tristate "SHA1 digest algorithm (OCTEON)"
        depends on CPU_CAVIUM_OCTEON
@@ -929,6 +952,16 @@ config CRYPTO_SHA1_PPC_SPE
          SHA-1 secure hash standard (DFIPS 180-4) implemented
          using powerpc SPE SIMD instruction set.
 
+config CRYPTO_SHA1_S390
+       tristate "SHA1 digest algorithm"
+       depends on S390
+       select CRYPTO_HASH
+       help
+         This is the s390 hardware accelerated implementation of the
+         SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
+
+         It is available as of z990.
+
 config CRYPTO_SHA256
        tristate "SHA224 and SHA256 digest algorithm"
        select CRYPTO_HASH
@@ -969,6 +1002,16 @@ config CRYPTO_SHA256_SPARC64
          SHA-256 secure hash standard (DFIPS 180-2) implemented
          using sparc64 crypto instructions, when available.
 
+config CRYPTO_SHA256_S390
+       tristate "SHA256 digest algorithm"
+       depends on S390
+       select CRYPTO_HASH
+       help
+         This is the s390 hardware accelerated implementation of the
+         SHA256 secure hash standard (DFIPS 180-2).
+
+         It is available as of z9.
+
 config CRYPTO_SHA512
        tristate "SHA384 and SHA512 digest algorithms"
        select CRYPTO_HASH
@@ -1009,6 +1052,26 @@ config CRYPTO_SHA3
          References:
          http://keccak.noekeon.org/
 
+config CRYPTO_SHA3_256_S390
+       tristate "SHA3_224 and SHA3_256 digest algorithm"
+       depends on S390
+       select CRYPTO_HASH
+       help
+         This is the s390 hardware accelerated implementation of the
+         SHA3_256 secure hash standard.
+
+         It is available as of z14.
+
+config CRYPTO_SHA3_512_S390
+       tristate "SHA3_384 and SHA3_512 digest algorithm"
+       depends on S390
+       select CRYPTO_HASH
+       help
+         This is the s390 hardware accelerated implementation of the
+         SHA3_512 secure hash standard.
+
+         It is available as of z14.
+
 config CRYPTO_SM3
        tristate
 
@@ -1069,6 +1132,16 @@ config CRYPTO_GHASH_CLMUL_NI_INTEL
          This is the x86_64 CLMUL-NI accelerated implementation of
          GHASH, the hash function used in GCM (Galois/Counter mode).
 
+config CRYPTO_GHASH_S390
+       tristate "GHASH hash function"
+       depends on S390
+       select CRYPTO_HASH
+       help
+         This is the s390 hardware accelerated implementation of GHASH,
+         the hash function used in GCM (Galois/Counter mode).
+
+         It is available as of z196.
+
 comment "Ciphers"
 
 config CRYPTO_AES
@@ -1184,6 +1257,23 @@ config CRYPTO_AES_PPC_SPE
          architecture specific assembler implementations that work on 1KB
          tables or 256 bytes S-boxes.
 
+config CRYPTO_AES_S390
+       tristate "AES cipher algorithms"
+       depends on S390
+       select CRYPTO_ALGAPI
+       select CRYPTO_SKCIPHER
+       help
+         This is the s390 hardware accelerated implementation of the
+         AES cipher algorithms (FIPS-197).
+
+         As of z9 the ECB and CBC modes are hardware accelerated
+         for 128 bit keys.
+         As of z10 the ECB and CBC modes are hardware accelerated
+         for all AES key sizes.
+         As of z196 the CTR mode is hardware accelerated for all AES
+         key sizes and XTS mode is hardware accelerated for 256 and
+         512 bit keys.
+
 config CRYPTO_ANUBIS
        tristate "Anubis cipher algorithm"
        depends on CRYPTO_USER_API_ENABLE_OBSOLETE
@@ -1414,6 +1504,19 @@ config CRYPTO_DES3_EDE_X86_64
          algorithm are provided; regular processing one input block and
          one that processes three blocks parallel.
 
+config CRYPTO_DES_S390
+       tristate "DES and Triple DES cipher algorithms"
+       depends on S390
+       select CRYPTO_ALGAPI
+       select CRYPTO_SKCIPHER
+       select CRYPTO_LIB_DES
+       help
+         This is the s390 hardware accelerated implementation of the
+         DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
+
+         As of z990 the ECB and CBC mode are hardware accelerated.
+         As of z196 the CTR mode is hardware accelerated.
+
 config CRYPTO_FCRYPT
        tristate "FCrypt cipher algorithm"
        select CRYPTO_ALGAPI
@@ -1473,6 +1576,18 @@ config CRYPTO_CHACHA_MIPS
        select CRYPTO_SKCIPHER
        select CRYPTO_ARCH_HAVE_LIB_CHACHA
 
+config CRYPTO_CHACHA_S390
+       tristate "ChaCha20 stream cipher"
+       depends on S390
+       select CRYPTO_SKCIPHER
+       select CRYPTO_LIB_CHACHA_GENERIC
+       select CRYPTO_ARCH_HAVE_LIB_CHACHA
+       help
+         This is the s390 SIMD implementation of the ChaCha20 stream
+         cipher (RFC 7539).
+
+         It is available as of z13.
+
 config CRYPTO_SEED
        tristate "SEED cipher algorithm"
        depends on CRYPTO_USER_API_ENABLE_OBSOLETE
index 43bc33e..ceaaa9f 100644 (file)
@@ -4,7 +4,7 @@
 #
 
 obj-$(CONFIG_CRYPTO) += crypto.o
-crypto-y := api.o cipher.o compress.o memneq.o
+crypto-y := api.o cipher.o compress.o
 
 obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o
 obj-$(CONFIG_CRYPTO_FIPS) += fips.o
index 460bc5d..3df3fe4 100644 (file)
@@ -75,4 +75,14 @@ config SIGNED_PE_FILE_VERIFICATION
          This option provides support for verifying the signature(s) on a
          signed PE binary.
 
+config FIPS_SIGNATURE_SELFTEST
+       bool "Run FIPS selftests on the X.509+PKCS7 signature verification"
+       help
+         This option causes some selftests to be run on the signature
+         verification code, using some built in data.  This is required
+         for FIPS.
+       depends on KEYS
+       depends on ASYMMETRIC_KEY_TYPE
+       depends on PKCS7_MESSAGE_PARSER
+
 endif # ASYMMETRIC_KEY_TYPE
index c38424f..0d1fa1b 100644 (file)
@@ -20,7 +20,9 @@ x509_key_parser-y := \
        x509.asn1.o \
        x509_akid.asn1.o \
        x509_cert_parser.o \
+       x509_loader.o \
        x509_public_key.o
+x509_key_parser-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += selftest.o
 
 $(obj)/x509_cert_parser.o: \
        $(obj)/x509.asn1.h \
diff --git a/crypto/asymmetric_keys/selftest.c b/crypto/asymmetric_keys/selftest.c
new file mode 100644 (file)
index 0000000..fa0bf7f
--- /dev/null
@@ -0,0 +1,224 @@
+/* Self-testing for signature checking.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/cred.h>
+#include <linux/key.h>
+#include <crypto/pkcs7.h>
+#include "x509_parser.h"
+
+struct certs_test {
+       const u8        *data;
+       size_t          data_len;
+       const u8        *pkcs7;
+       size_t          pkcs7_len;
+};
+
+/*
+ * Set of X.509 certificates to provide public keys for the tests.  These will
+ * be loaded into a temporary keyring for the duration of the testing.
+ */
+static const __initconst u8 certs_selftest_keys[] = {
+       "\x30\x82\x05\x55\x30\x82\x03\x3d\xa0\x03\x02\x01\x02\x02\x14\x73"
+       "\x98\xea\x98\x2d\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a"
+       "\xfc\x8c\x0a\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b"
+       "\x05\x00\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29\x43"
+       "\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66"
+       "\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65\x73"
+       "\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x20\x17\x0d\x32\x32\x30\x35"
+       "\x31\x38\x32\x32\x33\x32\x34\x31\x5a\x18\x0f\x32\x31\x32\x32\x30"
+       "\x34\x32\x34\x32\x32\x33\x32\x34\x31\x5a\x30\x34\x31\x32\x30\x30"
+       "\x06\x03\x55\x04\x03\x0c\x29\x43\x65\x72\x74\x69\x66\x69\x63\x61"
+       "\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x20"
+       "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79"
+       "\x30\x82\x02\x22\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01"
+       "\x01\x05\x00\x03\x82\x02\x0f\x00\x30\x82\x02\x0a\x02\x82\x02\x01"
+       "\x00\xcc\xac\x49\xdd\x3b\xca\xb0\x15\x7e\x84\x6a\xb2\x0a\x69\x5f"
+       "\x1c\x0a\x61\x82\x3b\x4f\x2c\xa3\x95\x2c\x08\x58\x4b\xb1\x5d\x99"
+       "\xe0\xc3\xc1\x79\xc2\xb3\xeb\xc0\x1e\x6d\x3e\x54\x1d\xbd\xb7\x92"
+       "\x7b\x4d\xb5\x95\x58\xb2\x52\x2e\xc6\x24\x4b\x71\x63\x80\x32\x77"
+       "\xa7\x38\x5e\xdb\x72\xae\x6e\x0d\xec\xfb\xb6\x6d\x01\x7f\xe9\x55"
+       "\x66\xdf\xbf\x1d\x76\x78\x02\x31\xe8\xe5\x07\xf8\xb7\x82\x5c\x0d"
+       "\xd4\xbb\xfb\xa2\x59\x0d\x2e\x3a\x78\x95\x3a\x8b\x46\x06\x47\x44"
+       "\x46\xd7\xcd\x06\x6a\x41\x13\xe3\x19\xf6\xbb\x6e\x38\xf4\x83\x01"
+       "\xa3\xbf\x4a\x39\x4f\xd7\x0a\xe9\x38\xb3\xf5\x94\x14\x4e\xdd\xf7"
+       "\x43\xfd\x24\xb2\x49\x3c\xa5\xf7\x7a\x7c\xd4\x45\x3d\x97\x75\x68"
+       "\xf1\xed\x4c\x42\x0b\x70\xca\x85\xf3\xde\xe5\x88\x2c\xc5\xbe\xb6"
+       "\x97\x34\xba\x24\x02\xcd\x8b\x86\x9f\xa9\x73\xca\x73\xcf\x92\x81"
+       "\xee\x75\x55\xbb\x18\x67\x5c\xff\x3f\xb5\xdd\x33\x1b\x0c\xe9\x78"
+       "\xdb\x5c\xcf\xaa\x5c\x43\x42\xdf\x5e\xa9\x6d\xec\xd7\xd7\xff\xe6"
+       "\xa1\x3a\x92\x1a\xda\xae\xf6\x8c\x6f\x7b\xd5\xb4\x6e\x06\xe9\x8f"
+       "\xe8\xde\x09\x31\x89\xed\x0e\x11\xa1\xfa\x8a\xe9\xe9\x64\x59\x62"
+       "\x53\xda\xd1\x70\xbe\x11\xd4\x99\x97\x11\xcf\x99\xde\x0b\x9d\x94"
+       "\x7e\xaa\xb8\x52\xea\x37\xdb\x90\x7e\x35\xbd\xd9\xfe\x6d\x0a\x48"
+       "\x70\x28\xdd\xd5\x0d\x7f\x03\x80\x93\x14\x23\x8f\xb9\x22\xcd\x7c"
+       "\x29\xfe\xf1\x72\xb5\x5c\x0b\x12\xcf\x9c\x15\xf6\x11\x4c\x7a\x45"
+       "\x25\x8c\x45\x0a\x34\xac\x2d\x9a\x81\xca\x0b\x13\x22\xcd\xeb\x1a"
+       "\x38\x88\x18\x97\x96\x08\x81\xaa\xcc\x8f\x0f\x8a\x32\x7b\x76\x68"
+       "\x03\x68\x43\xbf\x11\xba\x55\x60\xfd\x80\x1c\x0d\x9b\x69\xb6\x09"
+       "\x72\xbc\x0f\x41\x2f\x07\x82\xc6\xe3\xb2\x13\x91\xc4\x6d\x14\x95"
+       "\x31\xbe\x19\xbd\xbc\xed\xe1\x4c\x74\xa2\xe0\x78\x0b\xbb\x94\xec"
+       "\x4c\x53\x3a\xa2\xb5\x84\x1d\x4b\x65\x7e\xdc\xf7\xdb\x36\x7d\xbe"
+       "\x9e\x3b\x36\x66\x42\x66\x76\x35\xbf\xbe\xf0\xc1\x3c\x7c\xe9\x42"
+       "\x5c\x24\x53\x03\x05\xa8\x67\x24\x50\x02\x75\xff\x24\x46\x3b\x35"
+       "\x89\x76\xe6\x70\xda\xc5\x51\x8c\x9a\xe5\x05\xb0\x0b\xd0\x2d\xd4"
+       "\x7d\x57\x75\x94\x6b\xf9\x0a\xad\x0e\x41\x00\x15\xd0\x4f\xc0\x7f"
+       "\x90\x2d\x18\x48\x8f\x28\xfe\x5d\xa7\xcd\x99\x9e\xbd\x02\x6c\x8a"
+       "\x31\xf3\x1c\xc7\x4b\xe6\x93\xcd\x42\xa2\xe4\x68\x10\x47\x9d\xfc"
+       "\x21\x02\x03\x01\x00\x01\xa3\x5d\x30\x5b\x30\x0c\x06\x03\x55\x1d"
+       "\x13\x01\x01\xff\x04\x02\x30\x00\x30\x0b\x06\x03\x55\x1d\x0f\x04"
+       "\x04\x03\x02\x07\x80\x30\x1d\x06\x03\x55\x1d\x0e\x04\x16\x04\x14"
+       "\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88\x17"
+       "\x51\x8f\xe3\xdb\x30\x1f\x06\x03\x55\x1d\x23\x04\x18\x30\x16\x80"
+       "\x14\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88"
+       "\x17\x51\x8f\xe3\xdb\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01"
+       "\x01\x0b\x05\x00\x03\x82\x02\x01\x00\xc0\x2e\x12\x41\x7b\x73\x85"
+       "\x16\xc8\xdb\x86\x79\xe8\xf5\xcd\x44\xf4\xc6\xe2\x81\x23\x5e\x47"
+       "\xcb\xab\x25\xf1\x1e\x58\x3e\x31\x7f\x78\xad\x85\xeb\xfe\x14\x88"
+       "\x60\xf7\x7f\xd2\x26\xa2\xf4\x98\x2a\xfd\xba\x05\x0c\x20\x33\x12"
+       "\xcc\x4d\x14\x61\x64\x81\x93\xd3\x33\xed\xc8\xff\xf1\x78\xcc\x5f"
+       "\x51\x9f\x09\xd7\xbe\x0d\x5c\x74\xfd\x9b\xdf\x52\x4a\xc9\xa8\x71"
+       "\x25\x33\x04\x10\x67\x36\xd0\xb3\x0b\xc9\xa1\x40\x72\xae\x41\x7b"
+       "\x68\xe6\xe4\x7b\xd0\x28\xf7\x6d\xe7\x3f\x50\xfc\x91\x7c\x91\x56"
+       "\xd4\xdf\xa6\xbb\xe8\x4d\x1b\x58\xaa\x28\xfa\xc1\x19\xeb\x11\x2f"
+       "\x24\x8b\x7c\xc5\xa9\x86\x26\xaa\x6e\xb7\x9b\xd5\xf8\x06\xfb\x02"
+       "\x52\x7b\x9c\x9e\xa1\xe0\x07\x8b\x5e\xe4\xb8\x55\x29\xf6\x48\x52"
+       "\x1c\x1b\x54\x2d\x46\xd8\xe5\x71\xb9\x60\xd1\x45\xb5\x92\x89\x8a"
+       "\x63\x58\x2a\xb3\xc6\xb2\x76\xe2\x3c\x82\x59\x04\xae\x5a\xc4\x99"
+       "\x7b\x2e\x4b\x46\x57\xb8\x29\x24\xb2\xfd\xee\x2c\x0d\xa4\x83\xfa"
+       "\x65\x2a\x07\x35\x8b\x97\xcf\xbd\x96\x2e\xd1\x7e\x6c\xc2\x1e\x87"
+       "\xb6\x6c\x76\x65\xb5\xb2\x62\xda\x8b\xe9\x73\xe3\xdb\x33\xdd\x13"
+       "\x3a\x17\x63\x6a\x76\xde\x8d\x8f\xe0\x47\x61\x28\x3a\x83\xff\x8f"
+       "\xe7\xc7\xe0\x4a\xa3\xe5\x07\xcf\xe9\x8c\x35\x35\x2e\xe7\x80\x66"
+       "\x31\xbf\x91\x58\x0a\xe1\x25\x3d\x38\xd3\xa4\xf0\x59\x34\x47\x07"
+       "\x62\x0f\xbe\x30\xdd\x81\x88\x58\xf0\x28\xb0\x96\xe5\x82\xf8\x05"
+       "\xb7\x13\x01\xbc\xfa\xc6\x1f\x86\x72\xcc\xf9\xee\x8e\xd9\xd6\x04"
+       "\x8c\x24\x6c\xbf\x0f\x5d\x37\x39\xcf\x45\xc1\x93\x3a\xd2\xed\x5c"
+       "\x58\x79\x74\x86\x62\x30\x7e\x8e\xbb\xdd\x7a\xa9\xed\xca\x40\xcb"
+       "\x62\x47\xf4\xb4\x9f\x52\x7f\x72\x63\xa8\xf0\x2b\xaf\x45\x2a\x48"
+       "\x19\x6d\xe3\xfb\xf9\x19\x66\x69\xc8\xcc\x62\x87\x6c\x53\x2b\x2d"
+       "\x6e\x90\x6c\x54\x3a\x82\x25\x41\xcb\x18\x6a\xa4\x22\xa8\xa1\xc4"
+       "\x47\xd7\x81\x00\x1c\x15\x51\x0f\x1a\xaf\xef\x9f\xa6\x61\x8c\xbd"
+       "\x6b\x8b\xed\xe6\xac\x0e\xb6\x3a\x4c\x92\xe6\x0f\x91\x0a\x0f\x71"
+       "\xc7\xa0\xb9\x0d\x3a\x17\x5a\x6f\x35\xc8\xe7\x50\x4f\x46\xe8\x70"
+       "\x60\x48\x06\x82\x8b\x66\x58\xe6\x73\x91\x9c\x12\x3d\x35\x8e\x46"
+       "\xad\x5a\xf5\xb3\xdb\x69\x21\x04\xfd\xd3\x1c\xdf\x94\x9d\x56\xb0"
+       "\x0a\xd1\x95\x76\x8d\xec\x9e\xdd\x0b\x15\x97\x64\xad\xe5\xf2\x62"
+       "\x02\xfc\x9e\x5f\x56\x42\x39\x05\xb3"
+};
+
+/*
+ * Signed data and detached signature blobs that form the verification tests.
+ */
+static const __initconst u8 certs_selftest_1_data[] = {
+       "\x54\x68\x69\x73\x20\x69\x73\x20\x73\x6f\x6d\x65\x20\x74\x65\x73"
+       "\x74\x20\x64\x61\x74\x61\x20\x75\x73\x65\x64\x20\x66\x6f\x72\x20"
+       "\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x63\x65\x72"
+       "\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63"
+       "\x61\x74\x69\x6f\x6e\x2e\x0a"
+};
+
+static const __initconst u8 certs_selftest_1_pkcs7[] = {
+       "\x30\x82\x02\xab\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x07\x02\xa0"
+       "\x82\x02\x9c\x30\x82\x02\x98\x02\x01\x01\x31\x0d\x30\x0b\x06\x09"
+       "\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0b\x06\x09\x2a\x86\x48"
+       "\x86\xf7\x0d\x01\x07\x01\x31\x82\x02\x75\x30\x82\x02\x71\x02\x01"
+       "\x01\x30\x4c\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29"
+       "\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69"
+       "\x66\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65"
+       "\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x02\x14\x73\x98\xea\x98\x2d"
+       "\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a\xfc\x8c\x0a\x30"
+       "\x0b\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0d\x06\x09"
+       "\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x04\x82\x02\x00\xac"
+       "\xb0\xf2\x07\xd6\x99\x6d\xc0\xc0\xd9\x8d\x31\x0d\x7e\x04\xeb\xc3"
+       "\x88\x90\xc4\x58\x46\xd4\xe2\xa0\xa3\x25\xe3\x04\x50\x37\x85\x8c"
+       "\x91\xc6\xfc\xc5\xd4\x92\xfd\x05\xd8\xb8\xa3\xb8\xba\x89\x13\x00"
+       "\x88\x79\x99\x51\x6b\x5b\x28\x31\xc0\xb3\x1b\x7a\x68\x2c\x00\xdb"
+       "\x4b\x46\x11\xf3\xfa\x50\x8e\x19\x89\xa2\x4c\xda\x4c\x89\x01\x11"
+       "\x89\xee\xd3\xc8\xc1\xe7\xa7\xf6\xb2\xa2\xf8\x65\xb8\x35\x20\x33"
+       "\xba\x12\x62\xd5\xbd\xaa\x71\xe5\x5b\xc0\x6a\x32\xff\x6a\x2e\x23"
+       "\xef\x2b\xb6\x58\xb1\xfb\x5f\x82\x34\x40\x6d\x9f\xbc\x27\xac\x37"
+       "\x23\x99\xcf\x7d\x20\xb2\x39\x01\xc0\x12\xce\xd7\x5d\x2f\xb6\xab"
+       "\xb5\x56\x4f\xef\xf4\x72\x07\x58\x65\xa9\xeb\x1f\x75\x1c\x5f\x0c"
+       "\x88\xe0\xa4\xe2\xcd\x73\x2b\x9e\xb2\x05\x7e\x12\xf8\xd0\x66\x41"
+       "\xcc\x12\x63\xd4\xd6\xac\x9b\x1d\x14\x77\x8d\x1c\x57\xd5\x27\xc6"
+       "\x49\xa2\x41\x43\xf3\x59\x29\xe5\xcb\xd1\x75\xbc\x3a\x97\x2a\x72"
+       "\x22\x66\xc5\x3b\xc1\xba\xfc\x53\x18\x98\xe2\x21\x64\xc6\x52\x87"
+       "\x13\xd5\x7c\x42\xe8\xfb\x9c\x9a\x45\x32\xd5\xa5\x22\x62\x9d\xd4"
+       "\xcb\xa4\xfa\x77\xbb\x50\x24\x0b\x8b\x88\x99\x15\x56\xa9\x1e\x92"
+       "\xbf\x5d\x94\x77\xb6\xf1\x67\x01\x60\x06\x58\x5c\xdf\x18\x52\x79"
+       "\x37\x30\x93\x7d\x87\x04\xf1\xe0\x55\x59\x52\xf3\xc2\xb1\x1c\x5b"
+       "\x12\x7c\x49\x87\xfb\xf7\xed\xdd\x95\x71\xec\x4b\x1a\x85\x08\xb0"
+       "\xa0\x36\xc4\x7b\xab\x40\xe0\xf1\x98\xcc\xaf\x19\x40\x8f\x47\x6f"
+       "\xf0\x6c\x84\x29\x7f\x7f\x04\x46\xcb\x08\x0f\xe0\xc1\xc9\x70\x6e"
+       "\x95\x3b\xa4\xbc\x29\x2b\x53\x67\x45\x1b\x0d\xbc\x13\xa5\x76\x31"
+       "\xaf\xb9\xd0\xe0\x60\x12\xd2\xf4\xb7\x7c\x58\x7e\xf6\x2d\xbb\x24"
+       "\x14\x5a\x20\x24\xa8\x12\xdf\x25\xbd\x42\xce\x96\x7c\x2e\xba\x14"
+       "\x1b\x81\x9f\x18\x45\xa4\xc6\x70\x3e\x0e\xf0\xd3\x7b\x9c\x10\xbe"
+       "\xb8\x7a\x89\xc5\x9e\xd9\x97\xdf\xd7\xe7\xc6\x1d\xc0\x20\x6c\xb8"
+       "\x1e\x3a\x63\xb8\x39\x8e\x8e\x62\xd5\xd2\xb4\xcd\xff\x46\xfc\x8e"
+       "\xec\x07\x35\x0c\xff\xb0\x05\xe6\xf4\xe5\xfe\xa2\xe3\x0a\xe6\x36"
+       "\xa7\x4a\x7e\x62\x1d\xc4\x50\x39\x35\x4e\x28\xcb\x4a\xfb\x9d\xdb"
+       "\xdd\x23\xd6\x53\xb1\x74\x77\x12\xf7\x9c\xf0\x9a\x6b\xf7\xa9\x64"
+       "\x2d\x86\x21\x2a\xcf\xc6\x54\xf5\xc9\xad\xfa\xb5\x12\xb4\xf3\x51"
+       "\x77\x55\x3c\x6f\x0c\x32\xd3\x8c\x44\x39\x71\x25\xfe\x96\xd2"
+};
+
+/*
+ * List of tests to be run.
+ */
+#define TEST(data, pkcs7) { data, sizeof(data) - 1, pkcs7, sizeof(pkcs7) - 1 }
+static const struct certs_test certs_tests[] __initconst = {
+       TEST(certs_selftest_1_data, certs_selftest_1_pkcs7),
+};
+
+int __init fips_signature_selftest(void)
+{
+       struct key *keyring;
+       int ret, i;
+
+       pr_notice("Running certificate verification selftests\n");
+
+       keyring = keyring_alloc(".certs_selftest",
+                               GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
+                               (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+                               KEY_USR_VIEW | KEY_USR_READ |
+                               KEY_USR_SEARCH,
+                               KEY_ALLOC_NOT_IN_QUOTA,
+                               NULL, NULL);
+       if (IS_ERR(keyring))
+               panic("Can't allocate certs selftest keyring: %ld\n",
+                     PTR_ERR(keyring));
+
+       ret = x509_load_certificate_list(certs_selftest_keys,
+                                        sizeof(certs_selftest_keys) - 1, keyring);
+       if (ret < 0)
+               panic("Can't allocate certs selftest keyring: %d\n", ret);
+
+       for (i = 0; i < ARRAY_SIZE(certs_tests); i++) {
+               const struct certs_test *test = &certs_tests[i];
+               struct pkcs7_message *pkcs7;
+
+               pkcs7 = pkcs7_parse_message(test->pkcs7, test->pkcs7_len);
+               if (IS_ERR(pkcs7))
+                       panic("Certs selftest %d: pkcs7_parse_message() = %d\n", i, ret);
+
+               pkcs7_supply_detached_data(pkcs7, test->data, test->data_len);
+
+               ret = pkcs7_verify(pkcs7, VERIFYING_MODULE_SIGNATURE);
+               if (ret < 0)
+                       panic("Certs selftest %d: pkcs7_verify() = %d\n", i, ret);
+
+               ret = pkcs7_validate_trust(pkcs7, keyring);
+               if (ret < 0)
+                       panic("Certs selftest %d: pkcs7_validate_trust() = %d\n", i, ret);
+
+               pkcs7_free_message(pkcs7);
+       }
+
+       key_put(keyring);
+       return 0;
+}
similarity index 87%
rename from certs/common.c
rename to crypto/asymmetric_keys/x509_loader.c
index 16a2208..1bc169d 100644 (file)
@@ -2,11 +2,11 @@
 
 #include <linux/kernel.h>
 #include <linux/key.h>
-#include "common.h"
+#include <keys/asymmetric-type.h>
 
-int load_certificate_list(const u8 cert_list[],
-                         const unsigned long list_size,
-                         const struct key *keyring)
+int x509_load_certificate_list(const u8 cert_list[],
+                              const unsigned long list_size,
+                              const struct key *keyring)
 {
        key_ref_t key;
        const u8 *p, *end;
index 97a886c..a299c9c 100644 (file)
@@ -41,6 +41,15 @@ struct x509_certificate {
 };
 
 /*
+ * selftest.c
+ */
+#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST
+extern int __init fips_signature_selftest(void);
+#else
+static inline int fips_signature_selftest(void) { return 0; }
+#endif
+
+/*
  * x509_cert_parser.c
  */
 extern void x509_free_certificate(struct x509_certificate *cert);
index 77ed4e9..0b4943a 100644 (file)
@@ -244,9 +244,15 @@ static struct asymmetric_key_parser x509_key_parser = {
 /*
  * Module stuff
  */
+extern int __init certs_selftest(void);
 static int __init x509_key_init(void)
 {
-       return register_asymmetric_key_parser(&x509_key_parser);
+       int ret;
+
+       ret = register_asymmetric_key_parser(&x509_key_parser);
+       if (ret < 0)
+               return ret;
+       return fips_signature_selftest();
 }
 
 static void __exit x509_key_exit(void)
index e07782b..eaea733 100644 (file)
@@ -73,6 +73,7 @@ module_param(device_id_scheme, bool, 0444);
 static int only_lcd = -1;
 module_param(only_lcd, int, 0444);
 
+static bool may_report_brightness_keys;
 static int register_count;
 static DEFINE_MUTEX(register_count_mutex);
 static DEFINE_MUTEX(video_list_lock);
@@ -1222,6 +1223,9 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
        acpi_video_device_bind(video, data);
        acpi_video_device_find_cap(data);
 
+       if (data->cap._BCM && data->cap._BCL)
+               may_report_brightness_keys = true;
+
        mutex_lock(&video->device_list_lock);
        list_add_tail(&data->entry, &video->video_device_list);
        mutex_unlock(&video->device_list_lock);
@@ -1689,6 +1693,9 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
                break;
        }
 
+       if (keycode)
+               may_report_brightness_keys = true;
+
        acpi_notifier_call_chain(device, event, 0);
 
        if (keycode && (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS)) {
@@ -2249,6 +2256,7 @@ void acpi_video_unregister(void)
        if (register_count) {
                acpi_bus_unregister_driver(&acpi_video_bus);
                register_count = 0;
+               may_report_brightness_keys = false;
        }
        mutex_unlock(&register_count_mutex);
 }
@@ -2270,13 +2278,7 @@ void acpi_video_unregister_backlight(void)
 
 bool acpi_video_handles_brightness_key_presses(void)
 {
-       bool have_video_busses;
-
-       mutex_lock(&video_list_lock);
-       have_video_busses = !list_empty(&video_bus_head);
-       mutex_unlock(&video_list_lock);
-
-       return have_video_busses &&
+       return may_report_brightness_keys &&
               (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS);
 }
 EXPORT_SYMBOL(acpi_video_handles_brightness_key_presses);
index 86fa61a..e2db1bd 100644 (file)
@@ -298,7 +298,7 @@ EXPORT_SYMBOL_GPL(osc_cpc_flexible_adr_space_confirmed);
 bool osc_sb_native_usb4_support_confirmed;
 EXPORT_SYMBOL_GPL(osc_sb_native_usb4_support_confirmed);
 
-bool osc_sb_cppc_not_supported;
+bool osc_sb_cppc2_support_acked;
 
 static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
 static void acpi_bus_osc_negotiate_platform_control(void)
@@ -358,11 +358,6 @@ static void acpi_bus_osc_negotiate_platform_control(void)
                return;
        }
 
-#ifdef CONFIG_ACPI_CPPC_LIB
-       osc_sb_cppc_not_supported = !(capbuf_ret[OSC_SUPPORT_DWORD] &
-                       (OSC_SB_CPC_SUPPORT | OSC_SB_CPCV2_SUPPORT));
-#endif
-
        /*
         * Now run _OSC again with query flag clear and with the caps
         * supported by both the OS and the platform.
@@ -376,6 +371,10 @@ static void acpi_bus_osc_negotiate_platform_control(void)
 
        capbuf_ret = context.ret.pointer;
        if (context.ret.length > OSC_SUPPORT_DWORD) {
+#ifdef CONFIG_ACPI_CPPC_LIB
+               osc_sb_cppc2_support_acked = capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_CPCV2_SUPPORT;
+#endif
+
                osc_sb_apei_support_acked =
                        capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
                osc_pc_lpi_support_confirmed =
index 903528f..3c6d4ef 100644 (file)
@@ -578,6 +578,19 @@ bool __weak cpc_ffh_supported(void)
 }
 
 /**
+ * cpc_supported_by_cpu() - check if CPPC is supported by CPU
+ *
+ * Check if the architectural support for CPPC is present even
+ * if the _OSC hasn't prescribed it
+ *
+ * Return: true for supported, false for not supported
+ */
+bool __weak cpc_supported_by_cpu(void)
+{
+       return false;
+}
+
+/**
  * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
  *
  * Check and allocate the cppc_pcc_data memory.
@@ -684,8 +697,11 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
        acpi_status status;
        int ret = -ENODATA;
 
-       if (osc_sb_cppc_not_supported)
-               return -ENODEV;
+       if (!osc_sb_cppc2_support_acked) {
+               pr_debug("CPPC v2 _OSC not acked\n");
+               if (!cpc_supported_by_cpu())
+                       return -ENODEV;
+       }
 
        /* Parse the ACPI _CPC table for this CPU. */
        status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
@@ -766,7 +782,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
 
                                        if (!osc_cpc_flexible_adr_space_confirmed) {
                                                pr_debug("Flexible address space capability not supported\n");
-                                               goto out_free;
+                                               if (!cpc_supported_by_cpu())
+                                                       goto out_free;
                                        }
 
                                        addr = ioremap(gas_t->address, gas_t->bit_width/8);
@@ -793,7 +810,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
                                }
                                if (!osc_cpc_flexible_adr_space_confirmed) {
                                        pr_debug("Flexible address space capability not supported\n");
-                                       goto out_free;
+                                       if (!cpc_supported_by_cpu())
+                                               goto out_free;
                                }
                        } else {
                                if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
index 0e3ed5e..0cb2032 100644 (file)
@@ -493,13 +493,8 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
                goto skip_probe;
 
        ret = amba_read_periphid(dev);
-       if (ret) {
-               if (ret != -EPROBE_DEFER) {
-                       amba_device_put(dev);
-                       goto err_out;
-               }
+       if (ret)
                goto err_release;
-       }
 
 skip_probe:
        ret = device_add(&dev->dev);
@@ -546,6 +541,7 @@ static int amba_deferred_retry(void)
                        continue;
 
                list_del_init(&ddev->node);
+               amba_device_put(ddev->dev);
                kfree(ddev);
        }
 
index 6725931..c2c3238 100644 (file)
@@ -90,7 +90,7 @@ static void cs5535_set_piomode(struct ata_port *ap, struct ata_device *adev)
        static const u16 pio_cmd_timings[5] = {
                0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131
        };
-       u32 reg, dummy;
+       u32 reg, __maybe_unused dummy;
        struct ata_device *pair = ata_dev_pair(adev);
 
        int mode = adev->pio_mode - XFER_PIO_0;
@@ -129,7 +129,7 @@ static void cs5535_set_dmamode(struct ata_port *ap, struct ata_device *adev)
        static const u32 mwdma_timings[3] = {
                0x7F0FFFF3, 0x7F035352, 0x7F024241
        };
-       u32 reg, dummy;
+       u32 reg, __maybe_unused dummy;
        int mode = adev->dma_mode;
 
        rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy);
index 7cd789c..460d6f1 100644 (file)
@@ -486,7 +486,18 @@ static void device_link_release_fn(struct work_struct *work)
        /* Ensure that all references to the link object have been dropped. */
        device_link_synchronize_removal();
 
-       pm_runtime_release_supplier(link, true);
+       pm_runtime_release_supplier(link);
+       /*
+        * If supplier_preactivated is set, the link has been dropped between
+        * the pm_runtime_get_suppliers() and pm_runtime_put_suppliers() calls
+        * in __driver_probe_device().  In that case, drop the supplier's
+        * PM-runtime usage counter to remove the reference taken by
+        * pm_runtime_get_suppliers().
+        */
+       if (link->supplier_preactivated)
+               pm_runtime_put_noidle(link->supplier);
+
+       pm_request_idle(link->supplier);
 
        put_device(link->consumer);
        put_device(link->supplier);
index 2ef23fc..4c98849 100644 (file)
@@ -564,6 +564,18 @@ ssize_t __weak cpu_show_srbds(struct device *dev,
        return sysfs_emit(buf, "Not affected\n");
 }
 
+ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
+                                       struct device_attribute *attr, char *buf)
+{
+       return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_retbleed(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       return sysfs_emit(buf, "Not affected\n");
+}
+
 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
 static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@@ -573,6 +585,8 @@ static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
 static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
 static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
 static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
+static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
+static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
 
 static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_meltdown.attr,
@@ -584,6 +598,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_tsx_async_abort.attr,
        &dev_attr_itlb_multihit.attr,
        &dev_attr_srbds.attr,
+       &dev_attr_mmio_stale_data.attr,
+       &dev_attr_retbleed.attr,
        NULL
 };
 
index d8d0fe6..397eb98 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/init.h>
 #include <linux/memory.h>
 #include <linux/of.h>
+#include <linux/backing-dev.h>
 
 #include "base.h"
 
@@ -20,6 +21,7 @@
 void __init driver_init(void)
 {
        /* These are the core pieces */
+       bdi_init(&noop_backing_dev_info);
        devtmpfs_init();
        devices_init();
        buses_init();
index 084d67f..bc60c9c 100644 (file)
@@ -558,7 +558,7 @@ static ssize_t hard_offline_page_store(struct device *dev,
        if (kstrtoull(buf, 0, &pfn) < 0)
                return -EINVAL;
        pfn >>= PAGE_SHIFT;
-       ret = memory_failure(pfn, 0);
+       ret = memory_failure(pfn, MF_SW_SIMULATED);
        if (ret == -EOPNOTSUPP)
                ret = 0;
        return ret ? ret : count;
index 676dc72..949907e 100644 (file)
@@ -308,13 +308,10 @@ static int rpm_get_suppliers(struct device *dev)
 /**
  * pm_runtime_release_supplier - Drop references to device link's supplier.
  * @link: Target device link.
- * @check_idle: Whether or not to check if the supplier device is idle.
  *
- * Drop all runtime PM references associated with @link to its supplier device
- * and if @check_idle is set, check if that device is idle (and so it can be
- * suspended).
+ * Drop all runtime PM references associated with @link to its supplier device.
  */
-void pm_runtime_release_supplier(struct device_link *link, bool check_idle)
+void pm_runtime_release_supplier(struct device_link *link)
 {
        struct device *supplier = link->supplier;
 
@@ -327,9 +324,6 @@ void pm_runtime_release_supplier(struct device_link *link, bool check_idle)
        while (refcount_dec_not_one(&link->rpm_active) &&
               atomic_read(&supplier->power.usage_count) > 0)
                pm_runtime_put_noidle(supplier);
-
-       if (check_idle)
-               pm_request_idle(supplier);
 }
 
 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
@@ -337,8 +331,11 @@ static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
        struct device_link *link;
 
        list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
-                               device_links_read_lock_held())
-               pm_runtime_release_supplier(link, try_to_suspend);
+                               device_links_read_lock_held()) {
+               pm_runtime_release_supplier(link);
+               if (try_to_suspend)
+                       pm_request_idle(link->supplier);
+       }
 }
 
 static void rpm_put_suppliers(struct device *dev)
@@ -1771,7 +1768,6 @@ void pm_runtime_get_suppliers(struct device *dev)
                if (link->flags & DL_FLAG_PM_RUNTIME) {
                        link->supplier_preactivated = true;
                        pm_runtime_get_sync(link->supplier);
-                       refcount_inc(&link->rpm_active);
                }
 
        device_links_read_unlock(idx);
@@ -1791,19 +1787,8 @@ void pm_runtime_put_suppliers(struct device *dev)
        list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
                                device_links_read_lock_held())
                if (link->supplier_preactivated) {
-                       bool put;
-
                        link->supplier_preactivated = false;
-
-                       spin_lock_irq(&dev->power.lock);
-
-                       put = pm_runtime_status_suspended(dev) &&
-                             refcount_dec_not_one(&link->rpm_active);
-
-                       spin_unlock_irq(&dev->power.lock);
-
-                       if (put)
-                               pm_runtime_put(link->supplier);
+                       pm_runtime_put(link->supplier);
                }
 
        device_links_read_unlock(idx);
@@ -1838,7 +1823,8 @@ void pm_runtime_drop_link(struct device_link *link)
                return;
 
        pm_runtime_drop_link_count(link->consumer);
-       pm_runtime_release_supplier(link, true);
+       pm_runtime_release_supplier(link);
+       pm_request_idle(link->supplier);
 }
 
 static bool pm_runtime_need_not_resume(struct device *dev)
index 400c741..a6db605 100644 (file)
@@ -252,6 +252,7 @@ static void regmap_irq_enable(struct irq_data *data)
        struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
        struct regmap *map = d->map;
        const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
+       unsigned int reg = irq_data->reg_offset / map->reg_stride;
        unsigned int mask, type;
 
        type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
@@ -268,14 +269,14 @@ static void regmap_irq_enable(struct irq_data *data)
         * at the corresponding offset in regmap_irq_set_type().
         */
        if (d->chip->type_in_mask && type)
-               mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
+               mask = d->type_buf[reg] & irq_data->mask;
        else
                mask = irq_data->mask;
 
        if (d->chip->clear_on_unmask)
                d->clear_status = true;
 
-       d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
+       d->mask_buf[reg] &= ~mask;
 }
 
 static void regmap_irq_disable(struct irq_data *data)
@@ -386,6 +387,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
                subreg = &chip->sub_reg_offsets[b];
                for (i = 0; i < subreg->num_regs; i++) {
                        unsigned int offset = subreg->offset[i];
+                       unsigned int index = offset / map->reg_stride;
 
                        if (chip->not_fixed_stride)
                                ret = regmap_read(map,
@@ -394,7 +396,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
                        else
                                ret = regmap_read(map,
                                                chip->status_base + offset,
-                                               &data->status_buf[offset]);
+                                               &data->status_buf[index]);
 
                        if (ret)
                                break;
index 2221d98..c3517cc 100644 (file)
@@ -1880,8 +1880,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
  */
 bool regmap_can_raw_write(struct regmap *map)
 {
-       return map->bus && map->bus->write && map->format.format_val &&
-               map->format.format_reg;
+       return map->write && map->format.format_val && map->format.format_reg;
 }
 EXPORT_SYMBOL_GPL(regmap_can_raw_write);
 
@@ -2155,10 +2154,9 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg,
        size_t write_len;
        int ret;
 
-       if (!map->bus)
-               return -EINVAL;
-       if (!map->bus->write)
+       if (!map->write)
                return -ENOTSUPP;
+
        if (val_len % map->format.val_bytes)
                return -EINVAL;
        if (!IS_ALIGNED(reg, map->reg_stride))
@@ -2278,7 +2276,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
         * Some devices don't support bulk write, for them we have a series of
         * single write operations.
         */
-       if (!map->bus || !map->format.parse_inplace) {
+       if (!map->write || !map->format.parse_inplace) {
                map->lock(map->lock_arg);
                for (i = 0; i < val_count; i++) {
                        unsigned int ival;
@@ -2904,6 +2902,9 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
        size_t read_len;
        int ret;
 
+       if (!map->read)
+               return -ENOTSUPP;
+
        if (val_len % map->format.val_bytes)
                return -EINVAL;
        if (!IS_ALIGNED(reg, map->reg_stride))
@@ -3017,7 +3018,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
        if (val_count == 0)
                return -EINVAL;
 
-       if (map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
+       if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
                ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
                if (ret != 0)
                        return ret;
index a88ce44..3646c0c 100644 (file)
@@ -152,6 +152,10 @@ static unsigned int xen_blkif_max_ring_order;
 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
 
+static bool __read_mostly xen_blkif_trusted = true;
+module_param_named(trusted, xen_blkif_trusted, bool, 0644);
+MODULE_PARM_DESC(trusted, "Is the backend trusted");
+
 #define BLK_RING_SIZE(info)    \
        __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
 
@@ -210,6 +214,7 @@ struct blkfront_info
        unsigned int feature_discard:1;
        unsigned int feature_secdiscard:1;
        unsigned int feature_persistent:1;
+       unsigned int bounce:1;
        unsigned int discard_granularity;
        unsigned int discard_alignment;
        /* Number of 4KB segments handled */
@@ -310,8 +315,8 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
                if (!gnt_list_entry)
                        goto out_of_memory;
 
-               if (info->feature_persistent) {
-                       granted_page = alloc_page(GFP_NOIO);
+               if (info->bounce) {
+                       granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
                        if (!granted_page) {
                                kfree(gnt_list_entry);
                                goto out_of_memory;
@@ -330,7 +335,7 @@ out_of_memory:
        list_for_each_entry_safe(gnt_list_entry, n,
                                 &rinfo->grants, node) {
                list_del(&gnt_list_entry->node);
-               if (info->feature_persistent)
+               if (info->bounce)
                        __free_page(gnt_list_entry->page);
                kfree(gnt_list_entry);
                i--;
@@ -376,7 +381,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
        /* Assign a gref to this page */
        gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
        BUG_ON(gnt_list_entry->gref == -ENOSPC);
-       if (info->feature_persistent)
+       if (info->bounce)
                grant_foreign_access(gnt_list_entry, info);
        else {
                /* Grant access to the GFN passed by the caller */
@@ -400,7 +405,7 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head,
        /* Assign a gref to this page */
        gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
        BUG_ON(gnt_list_entry->gref == -ENOSPC);
-       if (!info->feature_persistent) {
+       if (!info->bounce) {
                struct page *indirect_page;
 
                /* Fetch a pre-allocated page to use for indirect grefs */
@@ -703,7 +708,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
                .grant_idx = 0,
                .segments = NULL,
                .rinfo = rinfo,
-               .need_copy = rq_data_dir(req) && info->feature_persistent,
+               .need_copy = rq_data_dir(req) && info->bounce,
        };
 
        /*
@@ -981,11 +986,12 @@ static void xlvbd_flush(struct blkfront_info *info)
 {
        blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
                              info->feature_fua ? true : false);
-       pr_info("blkfront: %s: %s %s %s %s %s\n",
+       pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
                info->gd->disk_name, flush_info(info),
                "persistent grants:", info->feature_persistent ?
                "enabled;" : "disabled;", "indirect descriptors:",
-               info->max_indirect_segments ? "enabled;" : "disabled;");
+               info->max_indirect_segments ? "enabled;" : "disabled;",
+               "bounce buffer:", info->bounce ? "enabled" : "disabled;");
 }
 
 static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
@@ -1207,7 +1213,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
        if (!list_empty(&rinfo->indirect_pages)) {
                struct page *indirect_page, *n;
 
-               BUG_ON(info->feature_persistent);
+               BUG_ON(info->bounce);
                list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
                        list_del(&indirect_page->lru);
                        __free_page(indirect_page);
@@ -1224,7 +1230,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
                                                          NULL);
                                rinfo->persistent_gnts_c--;
                        }
-                       if (info->feature_persistent)
+                       if (info->bounce)
                                __free_page(persistent_gnt->page);
                        kfree(persistent_gnt);
                }
@@ -1245,7 +1251,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
                for (j = 0; j < segs; j++) {
                        persistent_gnt = rinfo->shadow[i].grants_used[j];
                        gnttab_end_foreign_access(persistent_gnt->gref, NULL);
-                       if (info->feature_persistent)
+                       if (info->bounce)
                                __free_page(persistent_gnt->page);
                        kfree(persistent_gnt);
                }
@@ -1428,7 +1434,7 @@ static int blkif_completion(unsigned long *id,
        data.s = s;
        num_sg = s->num_sg;
 
-       if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
+       if (bret->operation == BLKIF_OP_READ && info->bounce) {
                for_each_sg(s->sg, sg, num_sg, i) {
                        BUG_ON(sg->offset + sg->length > PAGE_SIZE);
 
@@ -1487,7 +1493,7 @@ static int blkif_completion(unsigned long *id,
                                 * Add the used indirect page back to the list of
                                 * available pages for indirect grefs.
                                 */
-                               if (!info->feature_persistent) {
+                               if (!info->bounce) {
                                        indirect_page = s->indirect_grants[i]->page;
                                        list_add(&indirect_page->lru, &rinfo->indirect_pages);
                                }
@@ -1764,6 +1770,10 @@ static int talk_to_blkback(struct xenbus_device *dev,
        if (!info)
                return -ENODEV;
 
+       /* Check if backend is trusted. */
+       info->bounce = !xen_blkif_trusted ||
+                      !xenbus_read_unsigned(dev->nodename, "trusted", 1);
+
        max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
                                              "max-ring-page-order", 0);
        ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
@@ -2114,9 +2124,11 @@ static void blkfront_closing(struct blkfront_info *info)
                return;
 
        /* No more blkif_request(). */
-       blk_mq_stop_hw_queues(info->rq);
-       blk_mark_disk_dead(info->gd);
-       set_capacity(info->gd, 0);
+       if (info->rq && info->gd) {
+               blk_mq_stop_hw_queues(info->rq);
+               blk_mark_disk_dead(info->gd);
+               set_capacity(info->gd, 0);
+       }
 
        for_each_rinfo(info, rinfo, i) {
                /* No more gnttab callback work. */
@@ -2171,17 +2183,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
        if (err)
                goto out_of_memory;
 
-       if (!info->feature_persistent && info->max_indirect_segments) {
+       if (!info->bounce && info->max_indirect_segments) {
                /*
-                * We are using indirect descriptors but not persistent
-                * grants, we need to allocate a set of pages that can be
+                * We are using indirect descriptors but don't have a bounce
+                * buffer, we need to allocate a set of pages that can be
                 * used for mapping indirect grefs
                 */
                int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
 
                BUG_ON(!list_empty(&rinfo->indirect_pages));
                for (i = 0; i < num; i++) {
-                       struct page *indirect_page = alloc_page(GFP_KERNEL);
+                       struct page *indirect_page = alloc_page(GFP_KERNEL |
+                                                               __GFP_ZERO);
                        if (!indirect_page)
                                goto out_of_memory;
                        list_add(&indirect_page->lru, &rinfo->indirect_pages);
@@ -2274,6 +2287,8 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
                info->feature_persistent =
                        !!xenbus_read_unsigned(info->xbdev->otherend,
                                               "feature-persistent", 0);
+       if (info->feature_persistent)
+               info->bounce = true;
 
        indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
                                        "feature-max-indirect-segments", 0);
@@ -2457,16 +2472,19 @@ static int blkfront_remove(struct xenbus_device *xbdev)
 
        dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
 
-       del_gendisk(info->gd);
+       if (info->gd)
+               del_gendisk(info->gd);
 
        mutex_lock(&blkfront_mutex);
        list_del(&info->info_list);
        mutex_unlock(&blkfront_mutex);
 
        blkif_free(info, 0);
-       xlbd_release_minors(info->gd->first_minor, info->gd->minors);
-       blk_cleanup_disk(info->gd);
-       blk_mq_free_tag_set(&info->tag_set);
+       if (info->gd) {
+               xlbd_release_minors(info->gd->first_minor, info->gd->minors);
+               blk_cleanup_disk(info->gd);
+               blk_mq_free_tag_set(&info->tag_set);
+       }
 
        kfree(info);
        return 0;
@@ -2542,6 +2560,13 @@ static void blkfront_delay_work(struct work_struct *work)
        struct blkfront_info *info;
        bool need_schedule_work = false;
 
+       /*
+        * Note that when using bounce buffers but not persistent grants
+        * there's no need to run blkfront_delay_work because grants are
+        * revoked in blkif_completion or else an error is reported and the
+        * connection is closed.
+        */
+
        mutex_lock(&blkfront_mutex);
 
        list_for_each_entry(info, &info_list, info_list) {
index b25ff94..63b1b4a 100644 (file)
@@ -175,10 +175,9 @@ static int bt1_apb_request_rst(struct bt1_apb *apb)
        int ret;
 
        apb->prst = devm_reset_control_get_optional_exclusive(apb->dev, "prst");
-       if (IS_ERR(apb->prst)) {
-               dev_warn(apb->dev, "Couldn't get reset control line\n");
-               return PTR_ERR(apb->prst);
-       }
+       if (IS_ERR(apb->prst))
+               return dev_err_probe(apb->dev, PTR_ERR(apb->prst),
+                                    "Couldn't get reset control line\n");
 
        ret = reset_control_deassert(apb->prst);
        if (ret)
@@ -199,10 +198,9 @@ static int bt1_apb_request_clk(struct bt1_apb *apb)
        int ret;
 
        apb->pclk = devm_clk_get(apb->dev, "pclk");
-       if (IS_ERR(apb->pclk)) {
-               dev_err(apb->dev, "Couldn't get APB clock descriptor\n");
-               return PTR_ERR(apb->pclk);
-       }
+       if (IS_ERR(apb->pclk))
+               return dev_err_probe(apb->dev, PTR_ERR(apb->pclk),
+                                    "Couldn't get APB clock descriptor\n");
 
        ret = clk_prepare_enable(apb->pclk);
        if (ret) {
index e7a6744..70e49a6 100644 (file)
@@ -135,10 +135,9 @@ static int bt1_axi_request_rst(struct bt1_axi *axi)
        int ret;
 
        axi->arst = devm_reset_control_get_optional_exclusive(axi->dev, "arst");
-       if (IS_ERR(axi->arst)) {
-               dev_warn(axi->dev, "Couldn't get reset control line\n");
-               return PTR_ERR(axi->arst);
-       }
+       if (IS_ERR(axi->arst))
+               return dev_err_probe(axi->dev, PTR_ERR(axi->arst),
+                                    "Couldn't get reset control line\n");
 
        ret = reset_control_deassert(axi->arst);
        if (ret)
@@ -159,10 +158,9 @@ static int bt1_axi_request_clk(struct bt1_axi *axi)
        int ret;
 
        axi->aclk = devm_clk_get(axi->dev, "aclk");
-       if (IS_ERR(axi->aclk)) {
-               dev_err(axi->dev, "Couldn't get AXI Interconnect clock\n");
-               return PTR_ERR(axi->aclk);
-       }
+       if (IS_ERR(axi->aclk))
+               return dev_err_probe(axi->dev, PTR_ERR(axi->aclk),
+                                    "Couldn't get AXI Interconnect clock\n");
 
        ret = clk_prepare_enable(axi->aclk);
        if (ret) {
index e81a970..6143dbf 100644 (file)
@@ -1239,14 +1239,14 @@ error_cleanup_mc_io:
 static int fsl_mc_bus_remove(struct platform_device *pdev)
 {
        struct fsl_mc *mc = platform_get_drvdata(pdev);
+       struct fsl_mc_io *mc_io;
 
        if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev))
                return -EINVAL;
 
+       mc_io = mc->root_mc_bus_dev->mc_io;
        fsl_mc_device_remove(mc->root_mc_bus_dev);
-
-       fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
-       mc->root_mc_bus_dev->mc_io = NULL;
+       fsl_destroy_mc_io(mc_io);
 
        bus_unregister_notifier(&fsl_mc_bus_type, &fsl_mc_nb);
 
index 0e22e3b..38aad99 100644 (file)
@@ -1019,7 +1019,7 @@ static struct parport_driver lp_driver = {
 
 static int __init lp_init(void)
 {
-       int i, err = 0;
+       int i, err;
 
        if (parport_nr[0] == LP_PARPORT_OFF)
                return 0;
index 655e327..a1af90b 100644 (file)
@@ -87,7 +87,7 @@ static struct fasync_struct *fasync;
 
 /* Control how we warn userspace. */
 static struct ratelimit_state urandom_warning =
-       RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
+       RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
 static int ratelimit_disable __read_mostly =
        IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
@@ -408,7 +408,7 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter)
 
        /*
         * Immediately overwrite the ChaCha key at index 4 with random
-        * bytes, in case userspace causes copy_to_user() below to sleep
+        * bytes, in case userspace causes copy_to_iter() below to sleep
         * forever, so that we still retain forward secrecy in that case.
         */
        crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
@@ -1009,7 +1009,7 @@ void add_interrupt_randomness(int irq)
        if (new_count & MIX_INFLIGHT)
                return;
 
-       if (new_count < 64 && !time_is_before_jiffies(fast_pool->last + HZ))
+       if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
                return;
 
        if (unlikely(!fast_pool->mix.func))
@@ -1174,7 +1174,7 @@ static void __cold entropy_timer(struct timer_list *timer)
  */
 static void __cold try_to_generate_entropy(void)
 {
-       enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = 32 };
+       enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 30 };
        struct entropy_timer_state stack;
        unsigned int i, num_different = 0;
        unsigned long last = random_get_entropy();
index d1535ac..81cb909 100644 (file)
@@ -213,7 +213,7 @@ static int lan966x_gate_clk_register(struct device *dev,
 
                hw_data->hws[i] =
                        devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name,
-                                                 "lan966x", 0, base,
+                                                 "lan966x", 0, gate_base,
                                                  clk_gate_desc[idx].bit_idx,
                                                  0, &clk_gate_lock);
 
index 0408701..e893815 100644 (file)
@@ -111,6 +111,7 @@ int stm32_rcc_reset_init(struct device *dev, const struct of_device_id *match,
        if (!reset_data)
                return -ENOMEM;
 
+       spin_lock_init(&reset_data->lock);
        reset_data->membase = base;
        reset_data->rcdev.owner = THIS_MODULE;
        reset_data->rcdev.ops = &stm32_reset_ops;
index 29a8c71..b7962e5 100644 (file)
@@ -138,6 +138,7 @@ static struct ccu_common *sun50i_h6_r_ccu_clks[] = {
        &r_apb2_rsb_clk.common,
        &r_apb1_ir_clk.common,
        &r_apb1_w1_clk.common,
+       &r_apb1_rtc_clk.common,
        &ir_clk.common,
        &w1_clk.common,
 };
index ff188ab..bb47610 100644 (file)
@@ -565,4 +565,3 @@ void __init hv_init_clocksource(void)
        hv_sched_clock_offset = hv_read_reference_counter();
        hv_setup_sched_clock(read_hv_sched_clock_msr);
 }
-EXPORT_SYMBOL_GPL(hv_init_clocksource);
index 46023ad..4536ed4 100644 (file)
@@ -684,7 +684,7 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
        if (!devpriv->usb_rx_buf)
                return -ENOMEM;
 
-       size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE);
+       size = max(usb_endpoint_maxp(devpriv->ep_tx), MIN_BUF_SIZE);
        devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
        if (!devpriv->usb_tx_buf)
                return -ENOMEM;
index 7be38bc..9ac75c1 100644 (file)
@@ -566,6 +566,28 @@ static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
        return 0;
 }
 
+static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
+{
+       int ret;
+
+       ret = amd_pstate_enable(true);
+       if (ret)
+               pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
+
+       return ret;
+}
+
+static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
+{
+       int ret;
+
+       ret = amd_pstate_enable(false);
+       if (ret)
+               pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
+
+       return ret;
+}
+
 /* Sysfs attributes */
 
 /*
@@ -636,6 +658,8 @@ static struct cpufreq_driver amd_pstate_driver = {
        .target         = amd_pstate_target,
        .init           = amd_pstate_cpu_init,
        .exit           = amd_pstate_cpu_exit,
+       .suspend        = amd_pstate_cpu_suspend,
+       .resume         = amd_pstate_cpu_resume,
        .set_boost      = amd_pstate_set_boost,
        .name           = "amd-pstate",
        .attr           = amd_pstate_attr,
index 96de153..2c96de3 100644 (file)
@@ -127,6 +127,7 @@ static const struct of_device_id blocklist[] __initconst = {
        { .compatible = "mediatek,mt8173", },
        { .compatible = "mediatek,mt8176", },
        { .compatible = "mediatek,mt8183", },
+       { .compatible = "mediatek,mt8186", },
        { .compatible = "mediatek,mt8365", },
        { .compatible = "mediatek,mt8516", },
 
index 37a1eb2..76f6b38 100644 (file)
@@ -439,9 +439,13 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
 
        /* Both presence and absence of sram regulator are valid cases. */
        info->sram_reg = regulator_get_optional(cpu_dev, "sram");
-       if (IS_ERR(info->sram_reg))
+       if (IS_ERR(info->sram_reg)) {
+               ret = PTR_ERR(info->sram_reg);
+               if (ret == -EPROBE_DEFER)
+                       goto out_free_resources;
+
                info->sram_reg = NULL;
-       else {
+       else {
                ret = regulator_enable(info->sram_reg);
                if (ret) {
                        dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu);
index 20f64a8..4b8ee20 100644 (file)
@@ -470,6 +470,10 @@ static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
        if (slew_done_gpio_np)
                slew_done_gpio = read_gpio(slew_done_gpio_np);
 
+       of_node_put(volt_gpio_np);
+       of_node_put(freq_gpio_np);
+       of_node_put(slew_done_gpio_np);
+
        /* If we use the frequency GPIOs, calculate the min/max speeds based
         * on the bus frequencies
         */
index 0253731..36c7958 100644 (file)
@@ -442,6 +442,9 @@ static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
        struct platform_device *pdev = cpufreq_get_driver_data();
        int ret;
 
+       if (data->throttle_irq <= 0)
+               return 0;
+
        ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
        if (ret)
                dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
@@ -469,6 +472,9 @@ static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
 
 static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
 {
+       if (data->throttle_irq <= 0)
+               return;
+
        free_irq(data->throttle_irq, data);
 }
 
index 6b6b20d..573b417 100644 (file)
@@ -275,6 +275,7 @@ static int qoriq_cpufreq_probe(struct platform_device *pdev)
 
        np = of_find_matching_node(NULL, qoriq_cpufreq_blacklist);
        if (np) {
+               of_node_put(np);
                dev_info(&pdev->dev, "Disabling due to erratum A-008083");
                return -ENODEV;
        }
index be7f512..747aa53 100644 (file)
@@ -3,7 +3,8 @@
 # ARM CPU Idle drivers
 #
 config ARM_CPUIDLE
-       bool "Generic ARM/ARM64 CPU idle Driver"
+       bool "Generic ARM CPU idle Driver"
+       depends on ARM
        select DT_IDLE_STATES
        select CPU_IDLE_MULTIPLE_DRIVERS
        help
index ee99c02..3e6aa31 100644 (file)
@@ -133,98 +133,6 @@ config CRYPTO_PAES_S390
          Select this option if you want to use the paes cipher
          for example to use protected key encrypted devices.
 
-config CRYPTO_SHA1_S390
-       tristate "SHA1 digest algorithm"
-       depends on S390
-       select CRYPTO_HASH
-       help
-         This is the s390 hardware accelerated implementation of the
-         SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
-
-         It is available as of z990.
-
-config CRYPTO_SHA256_S390
-       tristate "SHA256 digest algorithm"
-       depends on S390
-       select CRYPTO_HASH
-       help
-         This is the s390 hardware accelerated implementation of the
-         SHA256 secure hash standard (DFIPS 180-2).
-
-         It is available as of z9.
-
-config CRYPTO_SHA512_S390
-       tristate "SHA384 and SHA512 digest algorithm"
-       depends on S390
-       select CRYPTO_HASH
-       help
-         This is the s390 hardware accelerated implementation of the
-         SHA512 secure hash standard.
-
-         It is available as of z10.
-
-config CRYPTO_SHA3_256_S390
-       tristate "SHA3_224 and SHA3_256 digest algorithm"
-       depends on S390
-       select CRYPTO_HASH
-       help
-         This is the s390 hardware accelerated implementation of the
-         SHA3_256 secure hash standard.
-
-         It is available as of z14.
-
-config CRYPTO_SHA3_512_S390
-       tristate "SHA3_384 and SHA3_512 digest algorithm"
-       depends on S390
-       select CRYPTO_HASH
-       help
-         This is the s390 hardware accelerated implementation of the
-         SHA3_512 secure hash standard.
-
-         It is available as of z14.
-
-config CRYPTO_DES_S390
-       tristate "DES and Triple DES cipher algorithms"
-       depends on S390
-       select CRYPTO_ALGAPI
-       select CRYPTO_SKCIPHER
-       select CRYPTO_LIB_DES
-       help
-         This is the s390 hardware accelerated implementation of the
-         DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
-
-         As of z990 the ECB and CBC mode are hardware accelerated.
-         As of z196 the CTR mode is hardware accelerated.
-
-config CRYPTO_AES_S390
-       tristate "AES cipher algorithms"
-       depends on S390
-       select CRYPTO_ALGAPI
-       select CRYPTO_SKCIPHER
-       help
-         This is the s390 hardware accelerated implementation of the
-         AES cipher algorithms (FIPS-197).
-
-         As of z9 the ECB and CBC modes are hardware accelerated
-         for 128 bit keys.
-         As of z10 the ECB and CBC modes are hardware accelerated
-         for all AES key sizes.
-         As of z196 the CTR mode is hardware accelerated for all AES
-         key sizes and XTS mode is hardware accelerated for 256 and
-         512 bit keys.
-
-config CRYPTO_CHACHA_S390
-       tristate "ChaCha20 stream cipher"
-       depends on S390
-       select CRYPTO_SKCIPHER
-       select CRYPTO_LIB_CHACHA_GENERIC
-       select CRYPTO_ARCH_HAVE_LIB_CHACHA
-       help
-         This is the s390 SIMD implementation of the ChaCha20 stream
-         cipher (RFC 7539).
-
-         It is available as of z13.
-
 config S390_PRNG
        tristate "Pseudo random number generator device driver"
        depends on S390
@@ -238,29 +146,6 @@ config S390_PRNG
 
          It is available as of z9.
 
-config CRYPTO_GHASH_S390
-       tristate "GHASH hash function"
-       depends on S390
-       select CRYPTO_HASH
-       help
-         This is the s390 hardware accelerated implementation of GHASH,
-         the hash function used in GCM (Galois/Counter mode).
-
-         It is available as of z196.
-
-config CRYPTO_CRC32_S390
-       tristate "CRC-32 algorithms"
-       depends on S390
-       select CRYPTO_HASH
-       select CRC32
-       help
-         Select this option if you want to use hardware accelerated
-         implementations of CRC algorithms.  With this option, you
-         can optimize the computation of CRC-32 (IEEE 802.3 Ethernet)
-         and CRC-32C (Castagnoli).
-
-         It is available with IBM z13 or later.
-
 config CRYPTO_DEV_NIAGARA2
        tristate "Niagara2 Stream Processing Unit driver"
        select CRYPTO_LIB_DES
index 9dba52f..7d79a87 100644 (file)
@@ -85,17 +85,9 @@ static int sp_get_irqs(struct sp_device *sp)
        struct sp_platform *sp_platform = sp->dev_specific;
        struct device *dev = sp->dev;
        struct platform_device *pdev = to_platform_device(dev);
-       unsigned int i, count;
        int ret;
 
-       for (i = 0, count = 0; i < pdev->num_resources; i++) {
-               struct resource *res = &pdev->resource[i];
-
-               if (resource_type(res) == IORESOURCE_IRQ)
-                       count++;
-       }
-
-       sp_platform->irq_count = count;
+       sp_platform->irq_count = platform_irq_count(pdev);
 
        ret = platform_get_irq(pdev, 0);
        if (ret < 0) {
@@ -104,7 +96,7 @@ static int sp_get_irqs(struct sp_device *sp)
        }
 
        sp->psp_irq = ret;
-       if (count == 1) {
+       if (sp_platform->irq_count == 1) {
                sp->ccp_irq = ret;
        } else {
                ret = platform_get_irq(pdev, 1);
index 0e89a7a..bfc8ee8 100644 (file)
@@ -197,7 +197,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
        else
                cxld->target_type = CXL_DECODER_ACCELERATOR;
 
-       if (is_cxl_endpoint(to_cxl_port(cxld->dev.parent)))
+       if (is_endpoint_decoder(&cxld->dev))
                return 0;
 
        target_list.value =
index 54f4347..cbf23be 100644 (file)
@@ -355,11 +355,13 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
                return -EBUSY;
 
        /* Check the input buffer is the expected size */
-       if (info->size_in != send_cmd->in.size)
+       if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
+           (info->size_in != send_cmd->in.size))
                return -ENOMEM;
 
        /* Check the output buffer is at least large enough */
-       if (send_cmd->out.size < info->size_out)
+       if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
+           (send_cmd->out.size < info->size_out))
                return -ENOMEM;
 
        *mem_cmd = (struct cxl_mem_command) {
index ea60abd..dbce99b 100644 (file)
@@ -272,7 +272,7 @@ static const struct device_type cxl_decoder_root_type = {
        .groups = cxl_decoder_root_attribute_groups,
 };
 
-static bool is_endpoint_decoder(struct device *dev)
+bool is_endpoint_decoder(struct device *dev)
 {
        return dev->type == &cxl_decoder_endpoint_type;
 }
index 140dc32..6799b27 100644 (file)
@@ -340,6 +340,7 @@ struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port,
 
 struct cxl_decoder *to_cxl_decoder(struct device *dev);
 bool is_root_decoder(struct device *dev);
+bool is_endpoint_decoder(struct device *dev);
 bool is_cxl_decoder(struct device *dev);
 struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
                                           unsigned int nr_targets);
index 60d10ee..7df0b05 100644 (file)
@@ -300,13 +300,13 @@ struct cxl_mbox_identify {
 } __packed;
 
 struct cxl_mbox_get_lsa {
-       u32 offset;
-       u32 length;
+       __le32 offset;
+       __le32 length;
 } __packed;
 
 struct cxl_mbox_set_lsa {
-       u32 offset;
-       u32 reserved;
+       __le32 offset;
+       __le32 reserved;
        u8 data[];
 } __packed;
 
index c310f1f..a979d0b 100644 (file)
@@ -29,6 +29,7 @@ static int create_endpoint(struct cxl_memdev *cxlmd,
 {
        struct cxl_dev_state *cxlds = cxlmd->cxlds;
        struct cxl_port *endpoint;
+       int rc;
 
        endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
                                     cxlds->component_reg_phys, parent_port);
@@ -37,13 +38,17 @@ static int create_endpoint(struct cxl_memdev *cxlmd,
 
        dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev));
 
+       rc = cxl_endpoint_autoremove(cxlmd, endpoint);
+       if (rc)
+               return rc;
+
        if (!endpoint->dev.driver) {
                dev_err(&cxlmd->dev, "%s failed probe\n",
                        dev_name(&endpoint->dev));
                return -ENXIO;
        }
 
-       return cxl_endpoint_autoremove(cxlmd, endpoint);
+       return 0;
 }
 
 static void enable_suspend(void *data)
index bbeef91..0aaa70b 100644 (file)
@@ -108,8 +108,8 @@ static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
                return -EINVAL;
 
        get_lsa = (struct cxl_mbox_get_lsa) {
-               .offset = cmd->in_offset,
-               .length = cmd->in_length,
+               .offset = cpu_to_le32(cmd->in_offset),
+               .length = cpu_to_le32(cmd->in_length),
        };
 
        rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LSA, &get_lsa,
@@ -139,7 +139,7 @@ static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
                return -ENOMEM;
 
        *set_lsa = (struct cxl_mbox_set_lsa) {
-               .offset = cmd->in_offset,
+               .offset = cpu_to_le32(cmd->in_offset),
        };
        memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
 
index 01474da..9602141 100644 (file)
@@ -123,7 +123,7 @@ void devfreq_get_freq_range(struct devfreq *devfreq,
                            unsigned long *min_freq,
                            unsigned long *max_freq)
 {
-       unsigned long *freq_table = devfreq->profile->freq_table;
+       unsigned long *freq_table = devfreq->freq_table;
        s32 qos_min_freq, qos_max_freq;
 
        lockdep_assert_held(&devfreq->lock);
@@ -133,11 +133,11 @@ void devfreq_get_freq_range(struct devfreq *devfreq,
         * The devfreq drivers can initialize this in either ascending or
         * descending order and devfreq core supports both.
         */
-       if (freq_table[0] < freq_table[devfreq->profile->max_state - 1]) {
+       if (freq_table[0] < freq_table[devfreq->max_state - 1]) {
                *min_freq = freq_table[0];
-               *max_freq = freq_table[devfreq->profile->max_state - 1];
+               *max_freq = freq_table[devfreq->max_state - 1];
        } else {
-               *min_freq = freq_table[devfreq->profile->max_state - 1];
+               *min_freq = freq_table[devfreq->max_state - 1];
                *max_freq = freq_table[0];
        }
 
@@ -169,8 +169,8 @@ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
 {
        int lev;
 
-       for (lev = 0; lev < devfreq->profile->max_state; lev++)
-               if (freq == devfreq->profile->freq_table[lev])
+       for (lev = 0; lev < devfreq->max_state; lev++)
+               if (freq == devfreq->freq_table[lev])
                        return lev;
 
        return -EINVAL;
@@ -178,7 +178,6 @@ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
 
 static int set_freq_table(struct devfreq *devfreq)
 {
-       struct devfreq_dev_profile *profile = devfreq->profile;
        struct dev_pm_opp *opp;
        unsigned long freq;
        int i, count;
@@ -188,25 +187,22 @@ static int set_freq_table(struct devfreq *devfreq)
        if (count <= 0)
                return -EINVAL;
 
-       profile->max_state = count;
-       profile->freq_table = devm_kcalloc(devfreq->dev.parent,
-                                       profile->max_state,
-                                       sizeof(*profile->freq_table),
-                                       GFP_KERNEL);
-       if (!profile->freq_table) {
-               profile->max_state = 0;
+       devfreq->max_state = count;
+       devfreq->freq_table = devm_kcalloc(devfreq->dev.parent,
+                                          devfreq->max_state,
+                                          sizeof(*devfreq->freq_table),
+                                          GFP_KERNEL);
+       if (!devfreq->freq_table)
                return -ENOMEM;
-       }
 
-       for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
+       for (i = 0, freq = 0; i < devfreq->max_state; i++, freq++) {
                opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
                if (IS_ERR(opp)) {
-                       devm_kfree(devfreq->dev.parent, profile->freq_table);
-                       profile->max_state = 0;
+                       devm_kfree(devfreq->dev.parent, devfreq->freq_table);
                        return PTR_ERR(opp);
                }
                dev_pm_opp_put(opp);
-               profile->freq_table[i] = freq;
+               devfreq->freq_table[i] = freq;
        }
 
        return 0;
@@ -246,7 +242,7 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
 
        if (lev != prev_lev) {
                devfreq->stats.trans_table[
-                       (prev_lev * devfreq->profile->max_state) + lev]++;
+                       (prev_lev * devfreq->max_state) + lev]++;
                devfreq->stats.total_trans++;
        }
 
@@ -835,6 +831,9 @@ struct devfreq *devfreq_add_device(struct device *dev,
                if (err < 0)
                        goto err_dev;
                mutex_lock(&devfreq->lock);
+       } else {
+               devfreq->freq_table = devfreq->profile->freq_table;
+               devfreq->max_state = devfreq->profile->max_state;
        }
 
        devfreq->scaling_min_freq = find_available_min_freq(devfreq);
@@ -870,8 +869,8 @@ struct devfreq *devfreq_add_device(struct device *dev,
 
        devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev,
                        array3_size(sizeof(unsigned int),
-                                   devfreq->profile->max_state,
-                                   devfreq->profile->max_state),
+                                   devfreq->max_state,
+                                   devfreq->max_state),
                        GFP_KERNEL);
        if (!devfreq->stats.trans_table) {
                mutex_unlock(&devfreq->lock);
@@ -880,7 +879,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
        }
 
        devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev,
-                       devfreq->profile->max_state,
+                       devfreq->max_state,
                        sizeof(*devfreq->stats.time_in_state),
                        GFP_KERNEL);
        if (!devfreq->stats.time_in_state) {
@@ -932,8 +931,9 @@ struct devfreq *devfreq_add_device(struct device *dev,
        err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
                                                NULL);
        if (err) {
-               dev_err(dev, "%s: Unable to start governor for the device\n",
-                       __func__);
+               dev_err_probe(dev, err,
+                       "%s: Unable to start governor for the device\n",
+                        __func__);
                goto err_init;
        }
        create_sysfs_files(devfreq, devfreq->governor);
@@ -1665,9 +1665,9 @@ static ssize_t available_frequencies_show(struct device *d,
 
        mutex_lock(&df->lock);
 
-       for (i = 0; i < df->profile->max_state; i++)
+       for (i = 0; i < df->max_state; i++)
                count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
-                               "%lu ", df->profile->freq_table[i]);
+                               "%lu ", df->freq_table[i]);
 
        mutex_unlock(&df->lock);
        /* Truncate the trailing space */
@@ -1690,7 +1690,7 @@ static ssize_t trans_stat_show(struct device *dev,
 
        if (!df->profile)
                return -EINVAL;
-       max_state = df->profile->max_state;
+       max_state = df->max_state;
 
        if (max_state == 0)
                return sprintf(buf, "Not Supported.\n");
@@ -1707,19 +1707,17 @@ static ssize_t trans_stat_show(struct device *dev,
        len += sprintf(buf + len, "           :");
        for (i = 0; i < max_state; i++)
                len += sprintf(buf + len, "%10lu",
-                               df->profile->freq_table[i]);
+                               df->freq_table[i]);
 
        len += sprintf(buf + len, "   time(ms)\n");
 
        for (i = 0; i < max_state; i++) {
-               if (df->profile->freq_table[i]
-                                       == df->previous_freq) {
+               if (df->freq_table[i] == df->previous_freq)
                        len += sprintf(buf + len, "*");
-               } else {
+               else
                        len += sprintf(buf + len, " ");
-               }
-               len += sprintf(buf + len, "%10lu:",
-                               df->profile->freq_table[i]);
+
+               len += sprintf(buf + len, "%10lu:", df->freq_table[i]);
                for (j = 0; j < max_state; j++)
                        len += sprintf(buf + len, "%10u",
                                df->stats.trans_table[(i * max_state) + j]);
@@ -1743,7 +1741,7 @@ static ssize_t trans_stat_store(struct device *dev,
        if (!df->profile)
                return -EINVAL;
 
-       if (df->profile->max_state == 0)
+       if (df->max_state == 0)
                return count;
 
        err = kstrtoint(buf, 10, &value);
@@ -1751,11 +1749,11 @@ static ssize_t trans_stat_store(struct device *dev,
                return -EINVAL;
 
        mutex_lock(&df->lock);
-       memset(df->stats.time_in_state, 0, (df->profile->max_state *
+       memset(df->stats.time_in_state, 0, (df->max_state *
                                        sizeof(*df->stats.time_in_state)));
        memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int),
-                                       df->profile->max_state,
-                                       df->profile->max_state));
+                                       df->max_state,
+                                       df->max_state));
        df->stats.total_trans = 0;
        df->stats.last_update = get_jiffies_64();
        mutex_unlock(&df->lock);
index 9b849d7..a443e7c 100644 (file)
@@ -519,15 +519,19 @@ static int of_get_devfreq_events(struct device_node *np,
 
        count = of_get_child_count(events_np);
        desc = devm_kcalloc(dev, count, sizeof(*desc), GFP_KERNEL);
-       if (!desc)
+       if (!desc) {
+               of_node_put(events_np);
                return -ENOMEM;
+       }
        info->num_events = count;
 
        of_id = of_match_device(exynos_ppmu_id_match, dev);
        if (of_id)
                info->ppmu_type = (enum exynos_ppmu_type)of_id->data;
-       else
+       else {
+               of_node_put(events_np);
                return -EINVAL;
+       }
 
        j = 0;
        for_each_child_of_node(events_np, node) {
index e689101..f7dcc44 100644 (file)
@@ -447,9 +447,9 @@ static int exynos_bus_probe(struct platform_device *pdev)
                }
        }
 
-       max_state = bus->devfreq->profile->max_state;
-       min_freq = (bus->devfreq->profile->freq_table[0] / 1000);
-       max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000);
+       max_state = bus->devfreq->max_state;
+       min_freq = (bus->devfreq->freq_table[0] / 1000);
+       max_freq = (bus->devfreq->freq_table[max_state - 1] / 1000);
        pr_info("exynos-bus: new bus device registered: %s (%6ld KHz ~ %6ld KHz)\n",
                        dev_name(dev), min_freq, max_freq);
 
index 72c6797..953cf9a 100644 (file)
@@ -1,4 +1,4 @@
-       // SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * linux/drivers/devfreq/governor_passive.c
  *
 #include <linux/slab.h>
 #include <linux/device.h>
 #include <linux/devfreq.h>
+#include <linux/units.h>
 #include "governor.h"
 
-#define HZ_PER_KHZ     1000
-
 static struct devfreq_cpu_data *
 get_parent_cpu_data(struct devfreq_passive_data *p_data,
                    struct cpufreq_policy *policy)
@@ -34,6 +33,20 @@ get_parent_cpu_data(struct devfreq_passive_data *p_data,
        return NULL;
 }
 
+static void delete_parent_cpu_data(struct devfreq_passive_data *p_data)
+{
+       struct devfreq_cpu_data *parent_cpu_data, *tmp;
+
+       list_for_each_entry_safe(parent_cpu_data, tmp, &p_data->cpu_data_list, node) {
+               list_del(&parent_cpu_data->node);
+
+               if (parent_cpu_data->opp_table)
+                       dev_pm_opp_put_opp_table(parent_cpu_data->opp_table);
+
+               kfree(parent_cpu_data);
+       }
+}
+
 static unsigned long get_target_freq_by_required_opp(struct device *p_dev,
                                                struct opp_table *p_opp_table,
                                                struct opp_table *opp_table,
@@ -131,18 +144,18 @@ static int get_target_freq_with_devfreq(struct devfreq *devfreq,
                goto out;
 
        /* Use interpolation if required opps is not available */
-       for (i = 0; i < parent_devfreq->profile->max_state; i++)
-               if (parent_devfreq->profile->freq_table[i] == *freq)
+       for (i = 0; i < parent_devfreq->max_state; i++)
+               if (parent_devfreq->freq_table[i] == *freq)
                        break;
 
-       if (i == parent_devfreq->profile->max_state)
+       if (i == parent_devfreq->max_state)
                return -EINVAL;
 
-       if (i < devfreq->profile->max_state) {
-               child_freq = devfreq->profile->freq_table[i];
+       if (i < devfreq->max_state) {
+               child_freq = devfreq->freq_table[i];
        } else {
-               count = devfreq->profile->max_state;
-               child_freq = devfreq->profile->freq_table[count - 1];
+               count = devfreq->max_state;
+               child_freq = devfreq->freq_table[count - 1];
        }
 
 out:
@@ -222,8 +235,7 @@ static int cpufreq_passive_unregister_notifier(struct devfreq *devfreq)
 {
        struct devfreq_passive_data *p_data
                        = (struct devfreq_passive_data *)devfreq->data;
-       struct devfreq_cpu_data *parent_cpu_data;
-       int cpu, ret = 0;
+       int ret;
 
        if (p_data->nb.notifier_call) {
                ret = cpufreq_unregister_notifier(&p_data->nb,
@@ -232,27 +244,9 @@ static int cpufreq_passive_unregister_notifier(struct devfreq *devfreq)
                        return ret;
        }
 
-       for_each_possible_cpu(cpu) {
-               struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
-               if (!policy) {
-                       ret = -EINVAL;
-                       continue;
-               }
-
-               parent_cpu_data = get_parent_cpu_data(p_data, policy);
-               if (!parent_cpu_data) {
-                       cpufreq_cpu_put(policy);
-                       continue;
-               }
+       delete_parent_cpu_data(p_data);
 
-               list_del(&parent_cpu_data->node);
-               if (parent_cpu_data->opp_table)
-                       dev_pm_opp_put_opp_table(parent_cpu_data->opp_table);
-               kfree(parent_cpu_data);
-               cpufreq_cpu_put(policy);
-       }
-
-       return ret;
+       return 0;
 }
 
 static int cpufreq_passive_register_notifier(struct devfreq *devfreq)
@@ -336,7 +330,6 @@ err_free_cpu_data:
 err_put_policy:
        cpufreq_cpu_put(policy);
 err:
-       WARN_ON(cpufreq_passive_unregister_notifier(devfreq));
 
        return ret;
 }
@@ -407,8 +400,7 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
        if (!p_data)
                return -EINVAL;
 
-       if (!p_data->this)
-               p_data->this = devfreq;
+       p_data->this = devfreq;
 
        switch (event) {
        case DEVFREQ_GOV_START:
index 0cce6e4..205acb2 100644 (file)
@@ -343,7 +343,7 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
                if (old->context != context)
                        continue;
 
-               dma_resv_list_set(list, i, replacement, usage);
+               dma_resv_list_set(list, i, dma_fence_get(replacement), usage);
                dma_fence_put(old);
        }
 }
index e733068..9631f2f 100644 (file)
@@ -32,8 +32,11 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct udmabuf *ubuf = vma->vm_private_data;
+       pgoff_t pgoff = vmf->pgoff;
 
-       vmf->page = ubuf->pages[vmf->pgoff];
+       if (pgoff >= ubuf->pagecount)
+               return VM_FAULT_SIGBUS;
+       vmf->page = ubuf->pages[pgoff];
        get_page(vmf->page);
        return 0;
 }
index 3e9d726..7b3e603 100644 (file)
@@ -1900,6 +1900,11 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
        for (i = 0; i < init_nr_desc_per_channel; i++) {
                desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
                if (!desc) {
+                       if (i == 0) {
+                               dev_warn(chan2dev(chan),
+                                        "can't allocate any descriptors\n");
+                               return -EIO;
+                       }
                        dev_warn(chan2dev(chan),
                                "only %d descriptors have been allocated\n", i);
                        break;
index 0a2168a..f696246 100644 (file)
@@ -675,16 +675,10 @@ static int dmatest_func(void *data)
        /*
         * src and dst buffers are freed by ourselves below
         */
-       if (params->polled) {
+       if (params->polled)
                flags = DMA_CTRL_ACK;
-       } else {
-               if (dma_has_cap(DMA_INTERRUPT, dev->cap_mask)) {
-                       flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
-               } else {
-                       pr_err("Channel does not support interrupt!\n");
-                       goto err_pq_array;
-               }
-       }
+       else
+               flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 
        ktime = ktime_get();
        while (!(kthread_should_stop() ||
@@ -912,7 +906,6 @@ error_unmap_continue:
        runtime = ktime_to_us(ktime);
 
        ret = 0;
-err_pq_array:
        kfree(dma_pq);
 err_srcs_array:
        kfree(srcs);
index e9c9bcb..c741da0 100644 (file)
@@ -1164,8 +1164,9 @@ static int dma_chan_pause(struct dma_chan *dchan)
                        BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
                axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
        } else {
-               val = BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
-                     BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
+               val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
+               val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
+                       BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
                axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
        }
 
@@ -1190,12 +1191,13 @@ static inline void axi_chan_resume(struct axi_dma_chan *chan)
 {
        u32 val;
 
-       val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
        if (chan->chip->dw->hdata->reg_map_8_channels) {
+               val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
                val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
                val |=  (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
                axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
        } else {
+               val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
                val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
                val |=  (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
                axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
index ff0ea60..5a8cc52 100644 (file)
@@ -716,10 +716,7 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
                struct idxd_wq *wq = idxd->wqs[i];
 
                mutex_lock(&wq->wq_lock);
-               if (wq->state == IDXD_WQ_ENABLED) {
-                       idxd_wq_disable_cleanup(wq);
-                       wq->state = IDXD_WQ_DISABLED;
-               }
+               idxd_wq_disable_cleanup(wq);
                idxd_wq_device_reset_cleanup(wq);
                mutex_unlock(&wq->wq_lock);
        }
index 355fb3e..aa34782 100644 (file)
@@ -512,15 +512,16 @@ static int idxd_probe(struct idxd_device *idxd)
        dev_dbg(dev, "IDXD reset complete\n");
 
        if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
-               if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA))
+               if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) {
                        dev_warn(dev, "Unable to turn on user SVA feature.\n");
-               else
+               } else {
                        set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
 
-               if (idxd_enable_system_pasid(idxd))
-                       dev_warn(dev, "No in-kernel DMA with PASID.\n");
-               else
-                       set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
+                       if (idxd_enable_system_pasid(idxd))
+                               dev_warn(dev, "No in-kernel DMA with PASID.\n");
+                       else
+                               set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
+               }
        } else if (!sva) {
                dev_warn(dev, "User forced SVA off via module param.\n");
        }
index 8535018..f37a276 100644 (file)
@@ -891,7 +891,7 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
         * SDMA stops cyclic channel when DMA request triggers a channel and no SDMA
         * owned buffer is available (i.e. BD_DONE was set too late).
         */
-       if (!is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) {
+       if (sdmac->desc && !is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) {
                dev_warn(sdmac->sdma->dev, "restart cyclic channel %d\n", sdmac->channel);
                sdma_enable_channel(sdmac->sdma, sdmac->channel);
        }
@@ -2346,7 +2346,7 @@ MODULE_DESCRIPTION("i.MX SDMA driver");
 #if IS_ENABLED(CONFIG_SOC_IMX6Q)
 MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
 #endif
-#if IS_ENABLED(CONFIG_SOC_IMX7D)
+#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M)
 MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
 #endif
 MODULE_LICENSE("GPL");
index efe8bd3..9b9184f 100644 (file)
@@ -1593,11 +1593,12 @@ static int intel_ldma_probe(struct platform_device *pdev)
        d->core_clk = devm_clk_get_optional(dev, NULL);
        if (IS_ERR(d->core_clk))
                return PTR_ERR(d->core_clk);
-       clk_prepare_enable(d->core_clk);
 
        d->rst = devm_reset_control_get_optional(dev, NULL);
        if (IS_ERR(d->rst))
                return PTR_ERR(d->rst);
+
+       clk_prepare_enable(d->core_clk);
        reset_control_deassert(d->rst);
 
        ret = devm_add_action_or_reset(dev, ldma_clk_disable, d);
index 858400e..09915a5 100644 (file)
@@ -2589,7 +2589,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
 
        /* If the DMAC pool is empty, alloc new */
        if (!desc) {
-               DEFINE_SPINLOCK(lock);
+               static DEFINE_SPINLOCK(lock);
                LIST_HEAD(pool);
 
                if (!add_desc(&pool, &lock, GFP_ATOMIC, 1))
index 87f6ca1..2ff787d 100644 (file)
@@ -558,14 +558,6 @@ static int bam_alloc_chan(struct dma_chan *chan)
        return 0;
 }
 
-static int bam_pm_runtime_get_sync(struct device *dev)
-{
-       if (pm_runtime_enabled(dev))
-               return pm_runtime_get_sync(dev);
-
-       return 0;
-}
-
 /**
  * bam_free_chan - Frees dma resources associated with specific channel
  * @chan: specified channel
@@ -581,7 +573,7 @@ static void bam_free_chan(struct dma_chan *chan)
        unsigned long flags;
        int ret;
 
-       ret = bam_pm_runtime_get_sync(bdev->dev);
+       ret = pm_runtime_get_sync(bdev->dev);
        if (ret < 0)
                return;
 
@@ -784,7 +776,7 @@ static int bam_pause(struct dma_chan *chan)
        unsigned long flag;
        int ret;
 
-       ret = bam_pm_runtime_get_sync(bdev->dev);
+       ret = pm_runtime_get_sync(bdev->dev);
        if (ret < 0)
                return ret;
 
@@ -810,7 +802,7 @@ static int bam_resume(struct dma_chan *chan)
        unsigned long flag;
        int ret;
 
-       ret = bam_pm_runtime_get_sync(bdev->dev);
+       ret = pm_runtime_get_sync(bdev->dev);
        if (ret < 0)
                return ret;
 
@@ -919,7 +911,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
        if (srcs & P_IRQ)
                tasklet_schedule(&bdev->task);
 
-       ret = bam_pm_runtime_get_sync(bdev->dev);
+       ret = pm_runtime_get_sync(bdev->dev);
        if (ret < 0)
                return IRQ_NONE;
 
@@ -1037,7 +1029,7 @@ static void bam_start_dma(struct bam_chan *bchan)
        if (!vd)
                return;
 
-       ret = bam_pm_runtime_get_sync(bdev->dev);
+       ret = pm_runtime_get_sync(bdev->dev);
        if (ret < 0)
                return;
 
@@ -1374,11 +1366,6 @@ static int bam_dma_probe(struct platform_device *pdev)
        if (ret)
                goto err_unregister_dma;
 
-       if (!bdev->bamclk) {
-               pm_runtime_disable(&pdev->dev);
-               return 0;
-       }
-
        pm_runtime_irq_safe(&pdev->dev);
        pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
        pm_runtime_use_autosuspend(&pdev->dev);
@@ -1462,10 +1449,8 @@ static int __maybe_unused bam_dma_suspend(struct device *dev)
 {
        struct bam_device *bdev = dev_get_drvdata(dev);
 
-       if (bdev->bamclk) {
-               pm_runtime_force_suspend(dev);
-               clk_unprepare(bdev->bamclk);
-       }
+       pm_runtime_force_suspend(dev);
+       clk_unprepare(bdev->bamclk);
 
        return 0;
 }
@@ -1475,13 +1460,11 @@ static int __maybe_unused bam_dma_resume(struct device *dev)
        struct bam_device *bdev = dev_get_drvdata(dev);
        int ret;
 
-       if (bdev->bamclk) {
-               ret = clk_prepare(bdev->bamclk);
-               if (ret)
-                       return ret;
+       ret = clk_prepare(bdev->bamclk);
+       if (ret)
+               return ret;
 
-               pm_runtime_force_resume(dev);
-       }
+       pm_runtime_force_resume(dev);
 
        return 0;
 }
index 71d24fc..f744ddb 100644 (file)
@@ -245,6 +245,7 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
        if (dma_spec->args[0] >= xbar->xbar_requests) {
                dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
                        dma_spec->args[0]);
+               put_device(&pdev->dev);
                return ERR_PTR(-EINVAL);
        }
 
@@ -252,12 +253,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
        dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
        if (!dma_spec->np) {
                dev_err(&pdev->dev, "Can't get DMA master\n");
+               put_device(&pdev->dev);
                return ERR_PTR(-EINVAL);
        }
 
        map = kzalloc(sizeof(*map), GFP_KERNEL);
        if (!map) {
                of_node_put(dma_spec->np);
+               put_device(&pdev->dev);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -268,6 +271,8 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
                mutex_unlock(&xbar->mutex);
                dev_err(&pdev->dev, "Run out of free DMA requests\n");
                kfree(map);
+               of_node_put(dma_spec->np);
+               put_device(&pdev->dev);
                return ERR_PTR(-ENOMEM);
        }
        set_bit(map->xbar_out, xbar->dma_inuse);
index 59b0bed..c8fa7dc 100644 (file)
@@ -103,9 +103,14 @@ static void dimm_setup_label(struct dimm_info *dimm, u16 handle)
 
        dmi_memdev_name(handle, &bank, &device);
 
-       /* both strings must be non-zero */
-       if (bank && *bank && device && *device)
-               snprintf(dimm->label, sizeof(dimm->label), "%s %s", bank, device);
+       /*
+        * Set to a NULL string when both bank and device are zero. In this case,
+        * the label assigned by default will be preserved.
+        */
+       snprintf(dimm->label, sizeof(dimm->label), "%s%s%s",
+                (bank && *bank) ? bank : "",
+                (bank && *bank && device && *device) ? " " : "",
+                (device && *device) ? device : "");
 }
 
 static void assign_dmi_dimm_info(struct dimm_info *dimm, struct memdev_dmi_entry *entry)
index 1cee64b..f7d37c2 100644 (file)
@@ -514,6 +514,28 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
        memset(p, 0, sizeof(*p));
 }
 
+static void enable_intr(struct synps_edac_priv *priv)
+{
+       /* Enable UE/CE Interrupts */
+       if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
+               writel(DDR_UE_MASK | DDR_CE_MASK,
+                      priv->baseaddr + ECC_CLR_OFST);
+       else
+               writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
+                      priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
+
+}
+
+static void disable_intr(struct synps_edac_priv *priv)
+{
+       /* Disable UE/CE Interrupts */
+       if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
+               writel(0x0, priv->baseaddr + ECC_CLR_OFST);
+       else
+               writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
+                      priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
+}
+
 /**
  * intr_handler - Interrupt Handler for ECC interrupts.
  * @irq:        IRQ number.
@@ -555,6 +577,9 @@ static irqreturn_t intr_handler(int irq, void *dev_id)
        /* v3.0 of the controller does not have this register */
        if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
                writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
+       else
+               enable_intr(priv);
+
        return IRQ_HANDLED;
 }
 
@@ -837,25 +862,6 @@ static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
        init_csrows(mci);
 }
 
-static void enable_intr(struct synps_edac_priv *priv)
-{
-       /* Enable UE/CE Interrupts */
-       if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
-               writel(DDR_UE_MASK | DDR_CE_MASK,
-                      priv->baseaddr + ECC_CLR_OFST);
-       else
-               writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
-                      priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
-
-}
-
-static void disable_intr(struct synps_edac_priv *priv)
-{
-       /* Disable UE/CE Interrupts */
-       writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
-                       priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
-}
-
 static int setup_irq(struct mem_ctl_info *mci,
                     struct platform_device *pdev)
 {
index c9fe590..9c89f7d 100644 (file)
@@ -1211,7 +1211,7 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
        struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
        struct fw_card *card = client->device->card;
        struct timespec64 ts = {0, 0};
-       u32 cycle_time;
+       u32 cycle_time = 0;
        int ret = 0;
 
        local_irq_disable();
index 90ed8fd..adddd8c 100644 (file)
@@ -372,8 +372,7 @@ static ssize_t rom_index_show(struct device *dev,
        struct fw_device *device = fw_device(dev->parent);
        struct fw_unit *unit = fw_unit(dev);
 
-       return snprintf(buf, PAGE_SIZE, "%d\n",
-                       (int)(unit->directory - device->config_rom));
+       return sysfs_emit(buf, "%td\n", unit->directory - device->config_rom);
 }
 
 static struct device_attribute fw_unit_attributes[] = {
@@ -403,8 +402,7 @@ static ssize_t guid_show(struct device *dev,
        int ret;
 
        down_read(&fw_device_rwsem);
-       ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
-                      device->config_rom[3], device->config_rom[4]);
+       ret = sysfs_emit(buf, "0x%08x%08x\n", device->config_rom[3], device->config_rom[4]);
        up_read(&fw_device_rwsem);
 
        return ret;
index 20fba73..a52f084 100644 (file)
@@ -36,7 +36,7 @@ struct scmi_msg_resp_base_attributes {
 
 struct scmi_msg_resp_base_discover_agent {
        __le32 agent_id;
-       u8 name[SCMI_MAX_STR_SIZE];
+       u8 name[SCMI_SHORT_NAME_MAX_SIZE];
 };
 
 
@@ -119,7 +119,7 @@ scmi_base_vendor_id_get(const struct scmi_protocol_handle *ph, bool sub_vendor)
 
        ret = ph->xops->do_xfer(ph, t);
        if (!ret)
-               memcpy(vendor_id, t->rx.buf, size);
+               strscpy(vendor_id, t->rx.buf, size);
 
        ph->xops->xfer_put(ph, t);
 
@@ -221,11 +221,17 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
                calc_list_sz = (1 + (loop_num_ret - 1) / sizeof(u32)) *
                                sizeof(u32);
                if (calc_list_sz != real_list_sz) {
-                       dev_err(dev,
-                               "Malformed reply - real_sz:%zd  calc_sz:%u\n",
-                               real_list_sz, calc_list_sz);
-                       ret = -EPROTO;
-                       break;
+                       dev_warn(dev,
+                                "Malformed reply - real_sz:%zd  calc_sz:%u  (loop_num_ret:%d)\n",
+                                real_list_sz, calc_list_sz, loop_num_ret);
+                       /*
+                        * Bail out if the expected list size is bigger than the
+                        * total payload size of the received reply.
+                        */
+                       if (calc_list_sz > real_list_sz) {
+                               ret = -EPROTO;
+                               break;
+                       }
                }
 
                for (loop = 0; loop < loop_num_ret; loop++)
@@ -270,7 +276,7 @@ static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph,
        ret = ph->xops->do_xfer(ph, t);
        if (!ret) {
                agent_info = t->rx.buf;
-               strlcpy(name, agent_info->name, SCMI_MAX_STR_SIZE);
+               strscpy(name, agent_info->name, SCMI_SHORT_NAME_MAX_SIZE);
        }
 
        ph->xops->xfer_put(ph, t);
@@ -369,7 +375,7 @@ static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph)
        int id, ret;
        u8 *prot_imp;
        u32 version;
-       char name[SCMI_MAX_STR_SIZE];
+       char name[SCMI_SHORT_NAME_MAX_SIZE];
        struct device *dev = ph->dev;
        struct scmi_revision_info *rev = scmi_revision_area_get(ph);
 
index f6fe723..d4e2310 100644 (file)
@@ -181,7 +181,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol,
                return NULL;
        }
 
-       id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL);
+       id = ida_alloc_min(&scmi_bus_id, 1, GFP_KERNEL);
        if (id < 0) {
                kfree_const(scmi_dev->name);
                kfree(scmi_dev);
@@ -204,7 +204,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol,
 put_dev:
        kfree_const(scmi_dev->name);
        put_device(&scmi_dev->dev);
-       ida_simple_remove(&scmi_bus_id, id);
+       ida_free(&scmi_bus_id, id);
        return NULL;
 }
 
@@ -212,7 +212,7 @@ void scmi_device_destroy(struct scmi_device *scmi_dev)
 {
        kfree_const(scmi_dev->name);
        scmi_handle_put(scmi_dev->handle);
-       ida_simple_remove(&scmi_bus_id, scmi_dev->id);
+       ida_free(&scmi_bus_id, scmi_dev->id);
        device_unregister(&scmi_dev->dev);
 }
 
index 4d36a9a..3ed7ae0 100644 (file)
@@ -153,7 +153,7 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
        if (!ret) {
                u32 latency = 0;
                attributes = le32_to_cpu(attr->attributes);
-               strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
+               strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
                /* clock_enable_latency field is present only since SCMI v3.1 */
                if (PROTOCOL_REV_MAJOR(version) >= 0x2)
                        latency = le32_to_cpu(attr->clock_enable_latency);
@@ -194,6 +194,7 @@ static int rate_cmp_func(const void *_r1, const void *_r2)
 }
 
 struct scmi_clk_ipriv {
+       struct device *dev;
        u32 clk_id;
        struct scmi_clock_info *clk;
 };
@@ -223,6 +224,29 @@ iter_clk_describe_update_state(struct scmi_iterator_state *st,
        st->num_returned = NUM_RETURNED(flags);
        p->clk->rate_discrete = RATE_DISCRETE(flags);
 
+       /* Warn about out of spec replies ... */
+       if (!p->clk->rate_discrete &&
+           (st->num_returned != 3 || st->num_remaining != 0)) {
+               dev_warn(p->dev,
+                        "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n",
+                        p->clk->name, st->num_returned, st->num_remaining,
+                        st->rx_len);
+
+               /*
+                * A known quirk: a triplet is returned but num_returned != 3
+                * Check for a safe payload size and fix.
+                */
+               if (st->num_returned != 3 && st->num_remaining == 0 &&
+                   st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
+                       st->num_returned = 3;
+                       st->num_remaining = 0;
+               } else {
+                       dev_err(p->dev,
+                               "Cannot fix out-of-spec reply !\n");
+                       return -EPROTO;
+               }
+       }
+
        return 0;
 }
 
@@ -255,7 +279,6 @@ iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
 
                *rate = RATE_TO_U64(r->rate[st->loop_idx]);
                p->clk->list.num_rates++;
-               //XXX dev_dbg(ph->dev, "Rate %llu Hz\n", *rate);
        }
 
        return ret;
@@ -266,9 +289,7 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
                              struct scmi_clock_info *clk)
 {
        int ret;
-
        void *iter;
-       struct scmi_msg_clock_describe_rates *msg;
        struct scmi_iterator_ops ops = {
                .prepare_message = iter_clk_describe_prepare_message,
                .update_state = iter_clk_describe_update_state,
@@ -277,11 +298,13 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
        struct scmi_clk_ipriv cpriv = {
                .clk_id = clk_id,
                .clk = clk,
+               .dev = ph->dev,
        };
 
        iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
                                            CLOCK_DESCRIBE_RATES,
-                                           sizeof(*msg), &cpriv);
+                                           sizeof(struct scmi_msg_clock_describe_rates),
+                                           &cpriv);
        if (IS_ERR(iter))
                return PTR_ERR(iter);
 
index c1922bd..8b7ac66 100644 (file)
@@ -1223,6 +1223,7 @@ static int scmi_iterator_run(void *iter)
                if (ret)
                        break;
 
+               st->rx_len = i->t->rx.len;
                ret = iops->update_state(st, i->resp, i->priv);
                if (ret)
                        break;
index b503c22..8abace5 100644 (file)
@@ -117,6 +117,7 @@ struct scmi_optee_channel {
        u32 channel_id;
        u32 tee_session;
        u32 caps;
+       u32 rx_len;
        struct mutex mu;
        struct scmi_chan_info *cinfo;
        union {
@@ -302,6 +303,9 @@ static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t
                return -EIO;
        }
 
+       /* Save response size */
+       channel->rx_len = param[2].u.memref.size;
+
        return 0;
 }
 
@@ -353,6 +357,7 @@ static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *ch
        shbuf = tee_shm_get_va(channel->tee_shm, 0);
        memset(shbuf, 0, msg_size);
        channel->req.msg = shbuf;
+       channel->rx_len = msg_size;
 
        return 0;
 }
@@ -508,7 +513,7 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
        struct scmi_optee_channel *channel = cinfo->transport_info;
 
        if (channel->tee_shm)
-               msg_fetch_response(channel->req.msg, SCMI_OPTEE_MAX_MSG_SIZE, xfer);
+               msg_fetch_response(channel->req.msg, channel->rx_len, xfer);
        else
                shmem_fetch_response(channel->req.shmem, xfer);
 }
index 8f4051a..bbb0331 100644 (file)
@@ -252,7 +252,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
                        dom_info->mult_factor =
                                        (dom_info->sustained_freq_khz * 1000) /
                                        dom_info->sustained_perf_level;
-               strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+               strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
        }
 
        ph->xops->xfer_put(ph, t);
@@ -332,7 +332,6 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
 {
        int ret;
        void *iter;
-       struct scmi_msg_perf_describe_levels *msg;
        struct scmi_iterator_ops ops = {
                .prepare_message = iter_perf_levels_prepare_message,
                .update_state = iter_perf_levels_update_state,
@@ -345,7 +344,8 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
 
        iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS,
                                            PERF_DESCRIBE_LEVELS,
-                                           sizeof(*msg), &ppriv);
+                                           sizeof(struct scmi_msg_perf_describe_levels),
+                                           &ppriv);
        if (IS_ERR(iter))
                return PTR_ERR(iter);
 
index 964882c..356e836 100644 (file)
@@ -122,7 +122,7 @@ scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
                dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags);
                dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags);
                dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags);
-               strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+               strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
        }
        ph->xops->xfer_put(ph, t);
 
index 73304af..51c3137 100644 (file)
@@ -24,8 +24,6 @@
 
 #include <asm/unaligned.h>
 
-#define SCMI_SHORT_NAME_MAX_SIZE       16
-
 #define PROTOCOL_REV_MINOR_MASK        GENMASK(15, 0)
 #define PROTOCOL_REV_MAJOR_MASK        GENMASK(31, 16)
 #define PROTOCOL_REV_MAJOR(x)  ((u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x))))
@@ -181,6 +179,8 @@ struct scmi_protocol_handle {
  * @max_resources: Maximum acceptable number of items, configured by the caller
  *                depending on the underlying resources that it is querying.
  * @loop_idx: The iterator loop index in the current multi-part reply.
+ * @rx_len: Size in bytes of the currenly processed message; it can be used by
+ *         the user of the iterator to verify a reply size.
  * @priv: Optional pointer to some additional state-related private data setup
  *       by the caller during the iterations.
  */
@@ -190,6 +190,7 @@ struct scmi_iterator_state {
        unsigned int num_remaining;
        unsigned int max_resources;
        unsigned int loop_idx;
+       size_t rx_len;
        void *priv;
 };
 
index a420a91..673f3eb 100644 (file)
@@ -116,7 +116,7 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
                dom_info->latency_us = le32_to_cpu(attr->latency);
                if (dom_info->latency_us == U32_MAX)
                        dom_info->latency_us = 0;
-               strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+               strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
        }
 
        ph->xops->xfer_put(ph, t);
index 21e0ce8..7288c61 100644 (file)
@@ -338,7 +338,6 @@ static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph,
                                        struct scmi_sensor_info *s)
 {
        void *iter;
-       struct scmi_msg_sensor_list_update_intervals *msg;
        struct scmi_iterator_ops ops = {
                .prepare_message = iter_intervals_prepare_message,
                .update_state = iter_intervals_update_state,
@@ -351,22 +350,28 @@ static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph,
 
        iter = ph->hops->iter_response_init(ph, &ops, s->intervals.count,
                                            SENSOR_LIST_UPDATE_INTERVALS,
-                                           sizeof(*msg), &upriv);
+                                           sizeof(struct scmi_msg_sensor_list_update_intervals),
+                                           &upriv);
        if (IS_ERR(iter))
                return PTR_ERR(iter);
 
        return ph->hops->iter_response_run(iter);
 }
 
+struct scmi_apriv {
+       bool any_axes_support_extended_names;
+       struct scmi_sensor_info *s;
+};
+
 static void iter_axes_desc_prepare_message(void *message,
                                           const unsigned int desc_index,
                                           const void *priv)
 {
        struct scmi_msg_sensor_axis_description_get *msg = message;
-       const struct scmi_sensor_info *s = priv;
+       const struct scmi_apriv *apriv = priv;
 
        /* Set the number of sensors to be skipped/already read */
-       msg->id = cpu_to_le32(s->id);
+       msg->id = cpu_to_le32(apriv->s->id);
        msg->axis_desc_index = cpu_to_le32(desc_index);
 }
 
@@ -393,19 +398,21 @@ iter_axes_desc_process_response(const struct scmi_protocol_handle *ph,
        u32 attrh, attrl;
        struct scmi_sensor_axis_info *a;
        size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ;
-       struct scmi_sensor_info *s = priv;
+       struct scmi_apriv *apriv = priv;
        const struct scmi_axis_descriptor *adesc = st->priv;
 
        attrl = le32_to_cpu(adesc->attributes_low);
+       if (SUPPORTS_EXTENDED_AXIS_NAMES(attrl))
+               apriv->any_axes_support_extended_names = true;
 
-       a = &s->axis[st->desc_index + st->loop_idx];
+       a = &apriv->s->axis[st->desc_index + st->loop_idx];
        a->id = le32_to_cpu(adesc->id);
        a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl);
 
        attrh = le32_to_cpu(adesc->attributes_high);
        a->scale = S32_EXT(SENSOR_SCALE(attrh));
        a->type = SENSOR_TYPE(attrh);
-       strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE);
+       strscpy(a->name, adesc->name, SCMI_SHORT_NAME_MAX_SIZE);
 
        if (a->extended_attrs) {
                unsigned int ares = le32_to_cpu(adesc->resolution);
@@ -444,10 +451,19 @@ iter_axes_extended_name_process_response(const struct scmi_protocol_handle *ph,
                                         void *priv)
 {
        struct scmi_sensor_axis_info *a;
-       const struct scmi_sensor_info *s = priv;
+       const struct scmi_apriv *apriv = priv;
        struct scmi_sensor_axis_name_descriptor *adesc = st->priv;
+       u32 axis_id = le32_to_cpu(adesc->axis_id);
 
-       a = &s->axis[st->desc_index + st->loop_idx];
+       if (axis_id >= st->max_resources)
+               return -EPROTO;
+
+       /*
+        * Pick the corresponding descriptor based on the axis_id embedded
+        * in the reply since the list of axes supporting extended names
+        * can be a subset of all the axes.
+        */
+       a = &apriv->s->axis[axis_id];
        strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE);
        st->priv = ++adesc;
 
@@ -458,21 +474,36 @@ static int
 scmi_sensor_axis_extended_names_get(const struct scmi_protocol_handle *ph,
                                    struct scmi_sensor_info *s)
 {
+       int ret;
        void *iter;
-       struct scmi_msg_sensor_axis_description_get *msg;
        struct scmi_iterator_ops ops = {
                .prepare_message = iter_axes_desc_prepare_message,
                .update_state = iter_axes_extended_name_update_state,
                .process_response = iter_axes_extended_name_process_response,
        };
+       struct scmi_apriv apriv = {
+               .any_axes_support_extended_names = false,
+               .s = s,
+       };
 
        iter = ph->hops->iter_response_init(ph, &ops, s->num_axis,
                                            SENSOR_AXIS_NAME_GET,
-                                           sizeof(*msg), s);
+                                           sizeof(struct scmi_msg_sensor_axis_description_get),
+                                           &apriv);
        if (IS_ERR(iter))
                return PTR_ERR(iter);
 
-       return ph->hops->iter_response_run(iter);
+       /*
+        * Do not cause whole protocol initialization failure when failing to
+        * get extended names for axes.
+        */
+       ret = ph->hops->iter_response_run(iter);
+       if (ret)
+               dev_warn(ph->dev,
+                        "Failed to get axes extended names for %s (ret:%d).\n",
+                        s->name, ret);
+
+       return 0;
 }
 
 static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph,
@@ -481,12 +512,15 @@ static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph,
 {
        int ret;
        void *iter;
-       struct scmi_msg_sensor_axis_description_get *msg;
        struct scmi_iterator_ops ops = {
                .prepare_message = iter_axes_desc_prepare_message,
                .update_state = iter_axes_desc_update_state,
                .process_response = iter_axes_desc_process_response,
        };
+       struct scmi_apriv apriv = {
+               .any_axes_support_extended_names = false,
+               .s = s,
+       };
 
        s->axis = devm_kcalloc(ph->dev, s->num_axis,
                               sizeof(*s->axis), GFP_KERNEL);
@@ -495,7 +529,8 @@ static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph,
 
        iter = ph->hops->iter_response_init(ph, &ops, s->num_axis,
                                            SENSOR_AXIS_DESCRIPTION_GET,
-                                           sizeof(*msg), s);
+                                           sizeof(struct scmi_msg_sensor_axis_description_get),
+                                           &apriv);
        if (IS_ERR(iter))
                return PTR_ERR(iter);
 
@@ -503,7 +538,8 @@ static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph,
        if (ret)
                return ret;
 
-       if (PROTOCOL_REV_MAJOR(version) >= 0x3)
+       if (PROTOCOL_REV_MAJOR(version) >= 0x3 &&
+           apriv.any_axes_support_extended_names)
                ret = scmi_sensor_axis_extended_names_get(ph, s);
 
        return ret;
@@ -598,7 +634,7 @@ iter_sens_descr_process_response(const struct scmi_protocol_handle *ph,
                            SUPPORTS_AXIS(attrh) ?
                            SENSOR_AXIS_NUMBER(attrh) : 0,
                            SCMI_MAX_NUM_SENSOR_AXIS);
-       strscpy(s->name, sdesc->name, SCMI_MAX_STR_SIZE);
+       strscpy(s->name, sdesc->name, SCMI_SHORT_NAME_MAX_SIZE);
 
        /*
         * If supported overwrite short name with the extended
index 9d195d8..eaa8d94 100644 (file)
@@ -180,7 +180,6 @@ static int scmi_voltage_levels_get(const struct scmi_protocol_handle *ph,
 {
        int ret;
        void *iter;
-       struct scmi_msg_cmd_describe_levels *msg;
        struct scmi_iterator_ops ops = {
                .prepare_message = iter_volt_levels_prepare_message,
                .update_state = iter_volt_levels_update_state,
@@ -193,7 +192,8 @@ static int scmi_voltage_levels_get(const struct scmi_protocol_handle *ph,
 
        iter = ph->hops->iter_response_init(ph, &ops, v->num_levels,
                                            VOLTAGE_DESCRIBE_LEVELS,
-                                           sizeof(*msg), &vpriv);
+                                           sizeof(struct scmi_msg_cmd_describe_levels),
+                                           &vpriv);
        if (IS_ERR(iter))
                return PTR_ERR(iter);
 
@@ -225,15 +225,14 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
 
                /* Retrieve domain attributes at first ... */
                put_unaligned_le32(dom, td->tx.buf);
-               ret = ph->xops->do_xfer(ph, td);
                /* Skip domain on comms error */
-               if (ret)
+               if (ph->xops->do_xfer(ph, td))
                        continue;
 
                v = vinfo->domains + dom;
                v->id = dom;
                attributes = le32_to_cpu(resp_dom->attr);
-               strlcpy(v->name, resp_dom->name, SCMI_MAX_STR_SIZE);
+               strscpy(v->name, resp_dom->name, SCMI_SHORT_NAME_MAX_SIZE);
 
                /*
                 * If supported overwrite short name with the extended one;
@@ -249,12 +248,8 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
                                v->async_level_set = true;
                }
 
-               ret = scmi_voltage_levels_get(ph, v);
                /* Skip invalid voltage descriptors */
-               if (ret)
-                       continue;
-
-               ph->xops->reset_rx_to_maxsz(ph, td);
+               scmi_voltage_levels_get(ph, v);
        }
 
        ph->xops->xfer_put(ph, td);
index 73089a2..ceae84c 100644 (file)
@@ -6,7 +6,7 @@
 #include <linux/efi.h>
 #include <linux/reboot.h>
 
-static void (*orig_pm_power_off)(void);
+static struct sys_off_handler *efi_sys_off_handler;
 
 int efi_reboot_quirk_mode = -1;
 
@@ -51,15 +51,11 @@ bool __weak efi_poweroff_required(void)
        return false;
 }
 
-static void efi_power_off(void)
+static int efi_power_off(struct sys_off_data *data)
 {
        efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
-       /*
-        * The above call should not return, if it does fall back to
-        * the original power off method (typically ACPI poweroff).
-        */
-       if (orig_pm_power_off)
-               orig_pm_power_off();
+
+       return NOTIFY_DONE;
 }
 
 static int __init efi_shutdown_init(void)
@@ -68,8 +64,13 @@ static int __init efi_shutdown_init(void)
                return -ENODEV;
 
        if (efi_poweroff_required()) {
-               orig_pm_power_off = pm_power_off;
-               pm_power_off = efi_power_off;
+               /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */
+               efi_sys_off_handler =
+                       register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
+                                                SYS_OFF_PRIO_FIRMWARE + 1,
+                                                efi_power_off, NULL);
+               if (IS_ERR(efi_sys_off_handler))
+                       return PTR_ERR(efi_sys_off_handler);
        }
 
        return 0;
index 4c7c9dd..7882d4b 100644 (file)
@@ -26,8 +26,6 @@
 #include <linux/sysfb.h>
 #include <video/vga.h>
 
-#include <asm/efi.h>
-
 enum {
        OVERRIDE_NONE = 0x0,
        OVERRIDE_BASE = 0x1,
index 2bfbb05..1f276f1 100644 (file)
 #include <linux/screen_info.h>
 #include <linux/sysfb.h>
 
+static struct platform_device *pd;
+static DEFINE_MUTEX(disable_lock);
+static bool disabled;
+
+static bool sysfb_unregister(void)
+{
+       if (IS_ERR_OR_NULL(pd))
+               return false;
+
+       platform_device_unregister(pd);
+       pd = NULL;
+
+       return true;
+}
+
+/**
+ * sysfb_disable() - disable the Generic System Framebuffers support
+ *
+ * This disables the registration of system framebuffer devices that match the
+ * generic drivers that make use of the system framebuffer set up by firmware.
+ *
+ * It also unregisters a device if this was already registered by sysfb_init().
+ *
+ * Context: The function can sleep. A @disable_lock mutex is acquired to serialize
+ *          against sysfb_init(), that registers a system framebuffer device.
+ */
+void sysfb_disable(void)
+{
+       mutex_lock(&disable_lock);
+       sysfb_unregister();
+       disabled = true;
+       mutex_unlock(&disable_lock);
+}
+EXPORT_SYMBOL_GPL(sysfb_disable);
+
 static __init int sysfb_init(void)
 {
        struct screen_info *si = &screen_info;
        struct simplefb_platform_data mode;
-       struct platform_device *pd;
        const char *name;
        bool compatible;
-       int ret;
+       int ret = 0;
+
+       mutex_lock(&disable_lock);
+       if (disabled)
+               goto unlock_mutex;
 
        /* try to create a simple-framebuffer device */
        compatible = sysfb_parse_mode(si, &mode);
        if (compatible) {
-               ret = sysfb_create_simplefb(si, &mode);
-               if (!ret)
-                       return 0;
+               pd = sysfb_create_simplefb(si, &mode);
+               if (!IS_ERR(pd))
+                       goto unlock_mutex;
        }
 
        /* if the FB is incompatible, create a legacy framebuffer device */
@@ -60,8 +98,10 @@ static __init int sysfb_init(void)
                name = "platform-framebuffer";
 
        pd = platform_device_alloc(name, 0);
-       if (!pd)
-               return -ENOMEM;
+       if (!pd) {
+               ret = -ENOMEM;
+               goto unlock_mutex;
+       }
 
        sysfb_apply_efi_quirks(pd);
 
@@ -73,9 +113,11 @@ static __init int sysfb_init(void)
        if (ret)
                goto err;
 
-       return 0;
+       goto unlock_mutex;
 err:
        platform_device_put(pd);
+unlock_mutex:
+       mutex_unlock(&disable_lock);
        return ret;
 }
 
index bda8712..a353e27 100644 (file)
@@ -57,8 +57,8 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
        return false;
 }
 
-__init int sysfb_create_simplefb(const struct screen_info *si,
-                                const struct simplefb_platform_data *mode)
+__init struct platform_device *sysfb_create_simplefb(const struct screen_info *si,
+                                                    const struct simplefb_platform_data *mode)
 {
        struct platform_device *pd;
        struct resource res;
@@ -76,7 +76,7 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
                base |= (u64)si->ext_lfb_base << 32;
        if (!base || (u64)(resource_size_t)base != base) {
                printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n");
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
        }
 
        /*
@@ -93,7 +93,7 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
        length = mode->height * mode->stride;
        if (length > size) {
                printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
        }
        length = PAGE_ALIGN(length);
 
@@ -104,11 +104,11 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
        res.start = base;
        res.end = res.start + length - 1;
        if (res.end <= res.start)
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
 
        pd = platform_device_alloc("simple-framebuffer", 0);
        if (!pd)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        sysfb_apply_efi_quirks(pd);
 
@@ -124,10 +124,10 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
        if (ret)
                goto err_put_device;
 
-       return 0;
+       return pd;
 
 err_put_device:
        platform_device_put(pd);
 
-       return ret;
+       return ERR_PTR(ret);
 }
index df56361..bea0e32 100644 (file)
@@ -434,25 +434,13 @@ static int grgpio_probe(struct platform_device *ofdev)
 static int grgpio_remove(struct platform_device *ofdev)
 {
        struct grgpio_priv *priv = platform_get_drvdata(ofdev);
-       int i;
-       int ret = 0;
-
-       if (priv->domain) {
-               for (i = 0; i < GRGPIO_MAX_NGPIO; i++) {
-                       if (priv->uirqs[i].refcnt != 0) {
-                               ret = -EBUSY;
-                               goto out;
-                       }
-               }
-       }
 
        gpiochip_remove(&priv->gc);
 
        if (priv->domain)
                irq_domain_remove(priv->domain);
 
-out:
-       return ret;
+       return 0;
 }
 
 static const struct of_device_id grgpio_match[] = {
index c5166cd..7f59e5d 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0+
 //
-// MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
+// MXS GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
 // Copyright 2008 Juergen Beisert, kernel@pengutronix.de
 //
 // Based on code from Freescale,
index 08bc52c..ecd7d16 100644 (file)
@@ -351,6 +351,9 @@ static const struct regmap_config pca953x_i2c_regmap = {
        .reg_bits = 8,
        .val_bits = 8,
 
+       .use_single_read = true,
+       .use_single_write = true,
+
        .readable_reg = pca953x_readable_register,
        .writeable_reg = pca953x_writeable_register,
        .volatile_reg = pca953x_volatile_register,
@@ -906,15 +909,18 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
 static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
 {
        DECLARE_BITMAP(val, MAX_LINE);
+       u8 regaddr;
        int ret;
 
-       ret = regcache_sync_region(chip->regmap, chip->regs->output,
-                                  chip->regs->output + NBANK(chip));
+       regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
+       ret = regcache_sync_region(chip->regmap, regaddr,
+                                  regaddr + NBANK(chip) - 1);
        if (ret)
                goto out;
 
-       ret = regcache_sync_region(chip->regmap, chip->regs->direction,
-                                  chip->regs->direction + NBANK(chip));
+       regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
+       ret = regcache_sync_region(chip->regmap, regaddr,
+                                  regaddr + NBANK(chip) - 1);
        if (ret)
                goto out;
 
@@ -1127,14 +1133,14 @@ static int pca953x_regcache_sync(struct device *dev)
         * sync these registers first and only then sync the rest.
         */
        regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
-       ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
+       ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
        if (ret) {
                dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
                return ret;
        }
 
        regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
-       ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
+       ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
        if (ret) {
                dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
                return ret;
@@ -1144,7 +1150,7 @@ static int pca953x_regcache_sync(struct device *dev)
        if (chip->driver_data & PCA_PCAL) {
                regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
                ret = regcache_sync_region(chip->regmap, regaddr,
-                                          regaddr + NBANK(chip));
+                                          regaddr + NBANK(chip) - 1);
                if (ret) {
                        dev_err(dev, "Failed to sync INT latch registers: %d\n",
                                ret);
@@ -1153,7 +1159,7 @@ static int pca953x_regcache_sync(struct device *dev)
 
                regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
                ret = regcache_sync_region(chip->regmap, regaddr,
-                                          regaddr + NBANK(chip));
+                                          regaddr + NBANK(chip) - 1);
                if (ret) {
                        dev_err(dev, "Failed to sync INT mask registers: %d\n",
                                ret);
index c52b2cb..63dcf42 100644 (file)
@@ -172,6 +172,8 @@ static void realtek_gpio_irq_unmask(struct irq_data *data)
        unsigned long flags;
        u16 m;
 
+       gpiochip_enable_irq(&ctrl->gc, line);
+
        raw_spin_lock_irqsave(&ctrl->lock, flags);
        m = ctrl->intr_mask[port];
        m |= realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
@@ -195,6 +197,8 @@ static void realtek_gpio_irq_mask(struct irq_data *data)
        ctrl->intr_mask[port] = m;
        realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
        raw_spin_unlock_irqrestore(&ctrl->lock, flags);
+
+       gpiochip_disable_irq(&ctrl->gc, line);
 }
 
 static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
@@ -315,13 +319,15 @@ static int realtek_gpio_irq_init(struct gpio_chip *gc)
        return 0;
 }
 
-static struct irq_chip realtek_gpio_irq_chip = {
+static const struct irq_chip realtek_gpio_irq_chip = {
        .name = "realtek-otto-gpio",
        .irq_ack = realtek_gpio_irq_ack,
        .irq_mask = realtek_gpio_irq_mask,
        .irq_unmask = realtek_gpio_irq_unmask,
        .irq_set_type = realtek_gpio_irq_set_type,
        .irq_set_affinity = realtek_gpio_irq_set_affinity,
+       .flags = IRQCHIP_IMMUTABLE,
+       GPIOCHIP_IRQ_RESOURCE_HELPERS,
 };
 
 static const struct of_device_id realtek_gpio_of_match[] = {
@@ -404,7 +410,7 @@ static int realtek_gpio_probe(struct platform_device *pdev)
        irq = platform_get_irq_optional(pdev, 0);
        if (!(dev_flags & GPIO_INTERRUPTS_DISABLED) && irq > 0) {
                girq = &ctrl->gc.irq;
-               girq->chip = &realtek_gpio_irq_chip;
+               gpio_irq_chip_set_chip(girq, &realtek_gpio_irq_chip);
                girq->default_type = IRQ_TYPE_NONE;
                girq->handler = handle_bad_irq;
                girq->parent_handler = realtek_gpio_irq_handler;
index 9810983..1020c2f 100644 (file)
@@ -991,28 +991,22 @@ static struct configfs_attribute *gpio_sim_device_config_attrs[] = {
 };
 
 struct gpio_sim_chip_name_ctx {
-       struct gpio_sim_device *dev;
+       struct fwnode_handle *swnode;
        char *page;
 };
 
 static int gpio_sim_emit_chip_name(struct device *dev, void *data)
 {
        struct gpio_sim_chip_name_ctx *ctx = data;
-       struct fwnode_handle *swnode;
-       struct gpio_sim_bank *bank;
 
        /* This would be the sysfs device exported in /sys/class/gpio. */
        if (dev->class)
                return 0;
 
-       swnode = dev_fwnode(dev);
+       if (device_match_fwnode(dev, ctx->swnode))
+               return sprintf(ctx->page, "%s\n", dev_name(dev));
 
-       list_for_each_entry(bank, &ctx->dev->bank_list, siblings) {
-               if (bank->swnode == swnode)
-                       return sprintf(ctx->page, "%s\n", dev_name(dev));
-       }
-
-       return -ENODATA;
+       return 0;
 }
 
 static ssize_t gpio_sim_bank_config_chip_name_show(struct config_item *item,
@@ -1020,7 +1014,7 @@ static ssize_t gpio_sim_bank_config_chip_name_show(struct config_item *item,
 {
        struct gpio_sim_bank *bank = to_gpio_sim_bank(item);
        struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank);
-       struct gpio_sim_chip_name_ctx ctx = { dev, page };
+       struct gpio_sim_chip_name_ctx ctx = { bank->swnode, page };
        int ret;
 
        mutex_lock(&dev->lock);
index 23cddb2..9db42f6 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
 
 #define VF610_GPIO_PER_PORT            32
 
index 98cd715..8d09b61 100644 (file)
@@ -217,8 +217,6 @@ static int giu_get_irq(unsigned int irq)
        printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
               maskl, pendl, maskh, pendh);
 
-       atomic_inc(&irq_err_count);
-
        return -EINVAL;
 }
 
index 7f8f5b0..4b61d97 100644 (file)
@@ -385,12 +385,13 @@ static int winbond_gpio_get(struct gpio_chip *gc, unsigned int offset)
        unsigned long *base = gpiochip_get_data(gc);
        const struct winbond_gpio_info *info;
        bool val;
+       int ret;
 
        winbond_gpio_get_info(&offset, &info);
 
-       val = winbond_sio_enter(*base);
-       if (val)
-               return val;
+       ret = winbond_sio_enter(*base);
+       if (ret)
+               return ret;
 
        winbond_sio_select_logical(*base, info->dev);
 
index b6d3a57..7f8e2fe 100644 (file)
@@ -99,7 +99,7 @@ static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v)
        const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
 
        map[index] &= ~(0xFFFFFFFFul << offset);
-       map[index] |= v << offset;
+       map[index] |= (unsigned long)v << offset;
 }
 
 static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)
index f5aa5f9..b26e643 100644 (file)
@@ -421,6 +421,10 @@ out_free_lh:
  * @work: the worker that implements software debouncing
  * @sw_debounced: flag indicating if the software debouncer is active
  * @level: the current debounced physical level of the line
+ * @hdesc: the Hardware Timestamp Engine (HTE) descriptor
+ * @raw_level: the line level at the time of event
+ * @total_discard_seq: the running counter of the discarded events
+ * @last_seqno: the last sequence number before debounce period expires
  */
 struct line {
        struct gpio_desc *desc;
@@ -1460,11 +1464,12 @@ static ssize_t linereq_read(struct file *file,
 static void linereq_free(struct linereq *lr)
 {
        unsigned int i;
-       bool hte;
+       bool hte = false;
 
        for (i = 0; i < lr->num_lines; i++) {
-               hte = !!test_bit(FLAG_EVENT_CLOCK_HTE,
-                                &lr->lines[i].desc->flags);
+               if (lr->lines[i].desc)
+                       hte = !!test_bit(FLAG_EVENT_CLOCK_HTE,
+                                        &lr->lines[i].desc->flags);
                edge_detector_stop(&lr->lines[i], hte);
                if (lr->lines[i].desc)
                        gpiod_free(lr->lines[i].desc);
index e88c497..f65656d 100644 (file)
@@ -256,7 +256,6 @@ config DRM_AMDGPU
        select HWMON
        select BACKLIGHT_CLASS_DEVICE
        select INTERVAL_TREE
-       select DRM_BUDDY
        help
          Choose this option if you have a recent AMD Radeon graphics card.
 
index 1f8161c..3b1c675 100644 (file)
@@ -714,7 +714,8 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 {
        bool all_hub = false;
 
-       if (adev->family == AMDGPU_FAMILY_AI)
+       if (adev->family == AMDGPU_FAMILY_AI ||
+           adev->family == AMDGPU_FAMILY_RV)
                all_hub = true;
 
        return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
index 6b6d46e..4608599 100644 (file)
@@ -1364,16 +1364,10 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
                                    struct amdgpu_vm *vm)
 {
        struct amdkfd_process_info *process_info = vm->process_info;
-       struct amdgpu_bo *pd = vm->root.bo;
 
        if (!process_info)
                return;
 
-       /* Release eviction fence from PD */
-       amdgpu_bo_reserve(pd, false);
-       amdgpu_bo_fence(pd, NULL, false);
-       amdgpu_bo_unreserve(pd);
-
        /* Update process info */
        mutex_lock(&process_info->lock);
        process_info->n_vms--;
index 714178f..2168163 100644 (file)
@@ -40,7 +40,7 @@ static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
 {
        struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
                                                   rhead);
-
+       mutex_destroy(&list->bo_list_mutex);
        kvfree(list);
 }
 
@@ -136,6 +136,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
 
        trace_amdgpu_cs_bo_status(list->num_entries, total_size);
 
+       mutex_init(&list->bo_list_mutex);
        *result = list;
        return 0;
 
index 529d52a..9caea16 100644 (file)
@@ -47,6 +47,10 @@ struct amdgpu_bo_list {
        struct amdgpu_bo *oa_obj;
        unsigned first_userptr;
        unsigned num_entries;
+
+       /* Protect access during command submission.
+        */
+       struct mutex bo_list_mutex;
 };
 
 int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
index b28af04..d8f1335 100644 (file)
@@ -519,6 +519,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                        return r;
        }
 
+       mutex_lock(&p->bo_list->bo_list_mutex);
+
        /* One for TTM and one for the CS job */
        amdgpu_bo_list_for_each_entry(e, p->bo_list)
                e->tv.num_shared = 2;
@@ -651,6 +653,7 @@ out_free_user_pages:
                        kvfree(e->user_pages);
                        e->user_pages = NULL;
                }
+               mutex_unlock(&p->bo_list->bo_list_mutex);
        }
        return r;
 }
@@ -690,9 +693,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
 {
        unsigned i;
 
-       if (error && backoff)
+       if (error && backoff) {
                ttm_eu_backoff_reservation(&parser->ticket,
                                           &parser->validated);
+               mutex_unlock(&parser->bo_list->bo_list_mutex);
+       }
 
        for (i = 0; i < parser->num_post_deps; i++) {
                drm_syncobj_put(parser->post_deps[i].syncobj);
@@ -832,12 +837,16 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
                        continue;
 
                r = amdgpu_vm_bo_update(adev, bo_va, false);
-               if (r)
+               if (r) {
+                       mutex_unlock(&p->bo_list->bo_list_mutex);
                        return r;
+               }
 
                r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
-               if (r)
+               if (r) {
+                       mutex_unlock(&p->bo_list->bo_list_mutex);
                        return r;
+               }
        }
 
        r = amdgpu_vm_handle_moved(adev, vm);
@@ -1278,6 +1287,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
        ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
        mutex_unlock(&p->adev->notifier_lock);
+       mutex_unlock(&p->bo_list->bo_list_mutex);
 
        return 0;
 
index 625424f..58df107 100644 (file)
@@ -5164,7 +5164,7 @@ int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
                 */
                amdgpu_unregister_gpu_instance(tmp_adev);
 
-               drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
+               drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
 
                /* disable ras on ALL IPs */
                if (!need_emergency_restart &&
index 17c9bbe..4dfd672 100644 (file)
@@ -1528,6 +1528,21 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
                                                  stime, etime, mode);
 }
 
+static bool
+amdgpu_display_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
+{
+       struct drm_device *dev = adev_to_drm(adev);
+       struct drm_fb_helper *fb_helper = dev->fb_helper;
+
+       if (!fb_helper || !fb_helper->buffer)
+               return false;
+
+       if (gem_to_amdgpu_bo(fb_helper->buffer->gem) != robj)
+               return false;
+
+       return true;
+}
+
 int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
 {
        struct drm_device *dev = adev_to_drm(adev);
@@ -1563,10 +1578,12 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
                        continue;
                }
                robj = gem_to_amdgpu_bo(fb->obj[0]);
-               r = amdgpu_bo_reserve(robj, true);
-               if (r == 0) {
-                       amdgpu_bo_unpin(robj);
-                       amdgpu_bo_unreserve(robj);
+               if (!amdgpu_display_robj_is_fb(adev, robj)) {
+                       r = amdgpu_bo_reserve(robj, true);
+                       if (r == 0) {
+                               amdgpu_bo_unpin(robj);
+                               amdgpu_bo_unreserve(robj);
+                       }
                }
        }
        return 0;
index b4cf871..89011ba 100644 (file)
@@ -320,6 +320,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
        if (!amdgpu_device_has_dc_support(adev)) {
                if (!adev->enable_virtual_display)
                        /* Disable vblank IRQs aggressively for power-saving */
+                       /* XXX: can this be enabled for DC? */
                        adev_to_drm(adev)->vblank_disable_immediate = true;
 
                r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
index 801f6fa..6de63ea 100644 (file)
@@ -642,7 +642,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                            atomic64_read(&adev->visible_pin_size),
                            vram_gtt.vram_size);
                vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size;
-               vram_gtt.gtt_size *= PAGE_SIZE;
                vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
                return copy_to_user(out, &vram_gtt,
                                    min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
@@ -675,7 +674,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
 
                mem.gtt.total_heap_size = gtt_man->size;
-               mem.gtt.total_heap_size *= PAGE_SIZE;
                mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
                        atomic64_read(&adev->gart_pin_size);
                mem.gtt.heap_usage = ttm_resource_manager_usage(gtt_man);
index 6546552..acfa207 100644 (file)
 #include <drm/ttm/ttm_resource.h>
 #include <drm/ttm/ttm_range_manager.h>
 
-#include "amdgpu_vram_mgr.h"
-
 /* state back for walking over vram_mgr and gtt_mgr allocations */
 struct amdgpu_res_cursor {
        uint64_t                start;
        uint64_t                size;
        uint64_t                remaining;
-       void                    *node;
-       uint32_t                mem_type;
+       struct drm_mm_node      *node;
 };
 
 /**
@@ -55,63 +52,27 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
                                    uint64_t start, uint64_t size,
                                    struct amdgpu_res_cursor *cur)
 {
-       struct drm_buddy_block *block;
-       struct list_head *head, *next;
        struct drm_mm_node *node;
 
-       if (!res)
-               goto fallback;
-
-       BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
-
-       cur->mem_type = res->mem_type;
-
-       switch (cur->mem_type) {
-       case TTM_PL_VRAM:
-               head = &to_amdgpu_vram_mgr_resource(res)->blocks;
-
-               block = list_first_entry_or_null(head,
-                                                struct drm_buddy_block,
-                                                link);
-               if (!block)
-                       goto fallback;
-
-               while (start >= amdgpu_vram_mgr_block_size(block)) {
-                       start -= amdgpu_vram_mgr_block_size(block);
-
-                       next = block->link.next;
-                       if (next != head)
-                               block = list_entry(next, struct drm_buddy_block, link);
-               }
-
-               cur->start = amdgpu_vram_mgr_block_start(block) + start;
-               cur->size = min(amdgpu_vram_mgr_block_size(block) - start, size);
-               cur->remaining = size;
-               cur->node = block;
-               break;
-       case TTM_PL_TT:
-               node = to_ttm_range_mgr_node(res)->mm_nodes;
-               while (start >= node->size << PAGE_SHIFT)
-                       start -= node++->size << PAGE_SHIFT;
-
-               cur->start = (node->start << PAGE_SHIFT) + start;
-               cur->size = min((node->size << PAGE_SHIFT) - start, size);
+       if (!res || res->mem_type == TTM_PL_SYSTEM) {
+               cur->start = start;
+               cur->size = size;
                cur->remaining = size;
-               cur->node = node;
-               break;
-       default:
-               goto fallback;
+               cur->node = NULL;
+               WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
+               return;
        }
 
-       return;
+       BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
 
-fallback:
-       cur->start = start;
-       cur->size = size;
+       node = to_ttm_range_mgr_node(res)->mm_nodes;
+       while (start >= node->size << PAGE_SHIFT)
+               start -= node++->size << PAGE_SHIFT;
+
+       cur->start = (node->start << PAGE_SHIFT) + start;
+       cur->size = min((node->size << PAGE_SHIFT) - start, size);
        cur->remaining = size;
-       cur->node = NULL;
-       WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
-       return;
+       cur->node = node;
 }
 
 /**
@@ -124,9 +85,7 @@ fallback:
  */
 static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
 {
-       struct drm_buddy_block *block;
-       struct drm_mm_node *node;
-       struct list_head *next;
+       struct drm_mm_node *node = cur->node;
 
        BUG_ON(size > cur->remaining);
 
@@ -140,27 +99,9 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
                return;
        }
 
-       switch (cur->mem_type) {
-       case TTM_PL_VRAM:
-               block = cur->node;
-
-               next = block->link.next;
-               block = list_entry(next, struct drm_buddy_block, link);
-
-               cur->node = block;
-               cur->start = amdgpu_vram_mgr_block_start(block);
-               cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining);
-               break;
-       case TTM_PL_TT:
-               node = cur->node;
-
-               cur->node = ++node;
-               cur->start = node->start << PAGE_SHIFT;
-               cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
-               break;
-       default:
-               return;
-       }
+       cur->node = ++node;
+       cur->start = node->start << PAGE_SHIFT;
+       cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
 }
 
 #endif
index be6f76a..3b4c194 100644 (file)
@@ -1798,18 +1798,26 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
        DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
                 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
 
-       /* Compute GTT size, either bsaed on 3/4th the size of RAM size
+       /* Compute GTT size, either based on 1/2 the size of RAM size
         * or whatever the user passed on module init */
        if (amdgpu_gtt_size == -1) {
                struct sysinfo si;
 
                si_meminfo(&si);
-               gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
-                              adev->gmc.mc_vram_size),
-                              ((uint64_t)si.totalram * si.mem_unit * 3/4));
-       }
-       else
+               /* Certain GL unit tests for large textures can cause problems
+                * with the OOM killer since there is no way to link this memory
+                * to a process.  This was originally mitigated (but not necessarily
+                * eliminated) by limiting the GTT size.  The problem is this limit
+                * is often too low for many modern games so just make the limit 1/2
+                * of system memory which aligns with TTM. The OOM accounting needs
+                * to be addressed, but we shouldn't prevent common 3D applications
+                * from being usable just to potentially mitigate that corner case.
+                */
+               gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+                              (u64)si.totalram * si.mem_unit / 2);
+       } else {
                gtt_size = (uint64_t)amdgpu_gtt_size << 20;
+       }
 
        /* Initialize GTT memory pool */
        r = amdgpu_gtt_mgr_init(adev, gtt_size);
index 6a70818..9120ae8 100644 (file)
@@ -26,7 +26,6 @@
 
 #include <linux/dma-direction.h>
 #include <drm/gpu_scheduler.h>
-#include "amdgpu_vram_mgr.h"
 #include "amdgpu.h"
 
 #define AMDGPU_PL_GDS          (TTM_PL_PRIV + 0)
 
 #define AMDGPU_POISON  0xd0bed0be
 
+struct amdgpu_vram_mgr {
+       struct ttm_resource_manager manager;
+       struct drm_mm mm;
+       spinlock_t lock;
+       struct list_head reservations_pending;
+       struct list_head reserved_pages;
+       atomic64_t vis_usage;
+};
+
 struct amdgpu_gtt_mgr {
        struct ttm_resource_manager manager;
        struct drm_mm mm;
index 576849e..108e8e8 100644 (file)
@@ -496,7 +496,8 @@ static int amdgpu_vkms_sw_init(void *handle)
        adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
 
        adev_to_drm(adev)->mode_config.preferred_depth = 24;
-       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+       /* disable prefer shadow for now due to hibernation issues */
+       adev_to_drm(adev)->mode_config.prefer_shadow = 0;
 
        adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
 
index 49e4092..0a76116 100644 (file)
 #include "atom.h"
 
 struct amdgpu_vram_reservation {
-       u64 start;
-       u64 size;
-       struct list_head allocated;
-       struct list_head blocks;
+       struct list_head node;
+       struct drm_mm_node mm_node;
 };
 
 static inline struct amdgpu_vram_mgr *
@@ -188,18 +186,18 @@ const struct attribute_group amdgpu_vram_mgr_attr_group = {
 };
 
 /**
- * amdgpu_vram_mgr_vis_size - Calculate visible block size
+ * amdgpu_vram_mgr_vis_size - Calculate visible node size
  *
  * @adev: amdgpu_device pointer
- * @block: DRM BUDDY block structure
+ * @node: MM node structure
  *
- * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM
+ * Calculate how many bytes of the MM node are inside visible VRAM
  */
 static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
-                                   struct drm_buddy_block *block)
+                                   struct drm_mm_node *node)
 {
-       u64 start = amdgpu_vram_mgr_block_start(block);
-       u64 end = start + amdgpu_vram_mgr_block_size(block);
+       uint64_t start = node->start << PAGE_SHIFT;
+       uint64_t end = (node->size + node->start) << PAGE_SHIFT;
 
        if (start >= adev->gmc.visible_vram_size)
                return 0;
@@ -220,9 +218,9 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        struct ttm_resource *res = bo->tbo.resource;
-       struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
-       struct drm_buddy_block *block;
-       u64 usage = 0;
+       unsigned pages = res->num_pages;
+       struct drm_mm_node *mm;
+       u64 usage;
 
        if (amdgpu_gmc_vram_full_visible(&adev->gmc))
                return amdgpu_bo_size(bo);
@@ -230,8 +228,9 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
        if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
                return 0;
 
-       list_for_each_entry(block, &vres->blocks, link)
-               usage += amdgpu_vram_mgr_vis_size(adev, block);
+       mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0];
+       for (usage = 0; pages; pages -= mm->size, mm++)
+               usage += amdgpu_vram_mgr_vis_size(adev, mm);
 
        return usage;
 }
@@ -241,30 +240,23 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
 {
        struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
        struct amdgpu_device *adev = to_amdgpu_device(mgr);
-       struct drm_buddy *mm = &mgr->mm;
+       struct drm_mm *mm = &mgr->mm;
        struct amdgpu_vram_reservation *rsv, *temp;
-       struct drm_buddy_block *block;
        uint64_t vis_usage;
 
-       list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) {
-               if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
-                                          rsv->size, mm->chunk_size, &rsv->allocated,
-                                          DRM_BUDDY_RANGE_ALLOCATION))
-                       continue;
-
-               block = amdgpu_vram_mgr_first_block(&rsv->allocated);
-               if (!block)
+       list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) {
+               if (drm_mm_reserve_node(mm, &rsv->mm_node))
                        continue;
 
                dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
-                       rsv->start, rsv->size);
+                       rsv->mm_node.start, rsv->mm_node.size);
 
-               vis_usage = amdgpu_vram_mgr_vis_size(adev, block);
+               vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
                atomic64_add(vis_usage, &mgr->vis_usage);
                spin_lock(&man->bdev->lru_lock);
-               man->usage += rsv->size;
+               man->usage += rsv->mm_node.size << PAGE_SHIFT;
                spin_unlock(&man->bdev->lru_lock);
-               list_move(&rsv->blocks, &mgr->reserved_pages);
+               list_move(&rsv->node, &mgr->reserved_pages);
        }
 }
 
@@ -286,16 +278,14 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
        if (!rsv)
                return -ENOMEM;
 
-       INIT_LIST_HEAD(&rsv->allocated);
-       INIT_LIST_HEAD(&rsv->blocks);
+       INIT_LIST_HEAD(&rsv->node);
+       rsv->mm_node.start = start >> PAGE_SHIFT;
+       rsv->mm_node.size = size >> PAGE_SHIFT;
 
-       rsv->start = start;
-       rsv->size = size;
-
-       mutex_lock(&mgr->lock);
-       list_add_tail(&rsv->blocks, &mgr->reservations_pending);
+       spin_lock(&mgr->lock);
+       list_add_tail(&rsv->node, &mgr->reservations_pending);
        amdgpu_vram_mgr_do_reserve(&mgr->manager);
-       mutex_unlock(&mgr->lock);
+       spin_unlock(&mgr->lock);
 
        return 0;
 }
@@ -317,19 +307,19 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
        struct amdgpu_vram_reservation *rsv;
        int ret;
 
-       mutex_lock(&mgr->lock);
+       spin_lock(&mgr->lock);
 
-       list_for_each_entry(rsv, &mgr->reservations_pending, blocks) {
-               if (rsv->start <= start &&
-                   (start < (rsv->start + rsv->size))) {
+       list_for_each_entry(rsv, &mgr->reservations_pending, node) {
+               if ((rsv->mm_node.start <= start) &&
+                   (start < (rsv->mm_node.start + rsv->mm_node.size))) {
                        ret = -EBUSY;
                        goto out;
                }
        }
 
-       list_for_each_entry(rsv, &mgr->reserved_pages, blocks) {
-               if (rsv->start <= start &&
-                   (start < (rsv->start + rsv->size))) {
+       list_for_each_entry(rsv, &mgr->reserved_pages, node) {
+               if ((rsv->mm_node.start <= start) &&
+                   (start < (rsv->mm_node.start + rsv->mm_node.size))) {
                        ret = 0;
                        goto out;
                }
@@ -337,11 +327,33 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
 
        ret = -ENOENT;
 out:
-       mutex_unlock(&mgr->lock);
+       spin_unlock(&mgr->lock);
        return ret;
 }
 
 /**
+ * amdgpu_vram_mgr_virt_start - update virtual start address
+ *
+ * @mem: ttm_resource to update
+ * @node: just allocated node
+ *
+ * Calculate a virtual BO start address to easily check if everything is CPU
+ * accessible.
+ */
+static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
+                                      struct drm_mm_node *node)
+{
+       unsigned long start;
+
+       start = node->start + node->size;
+       if (start > mem->num_pages)
+               start -= mem->num_pages;
+       else
+               start = 0;
+       mem->start = max(mem->start, start);
+}
+
+/**
  * amdgpu_vram_mgr_new - allocate new ranges
  *
  * @man: TTM memory type manager
@@ -356,44 +368,46 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
                               const struct ttm_place *place,
                               struct ttm_resource **res)
 {
-       u64 vis_usage = 0, max_bytes, cur_size, min_block_size;
+       unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
        struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
        struct amdgpu_device *adev = to_amdgpu_device(mgr);
-       struct amdgpu_vram_mgr_resource *vres;
-       u64 size, remaining_size, lpfn, fpfn;
-       struct drm_buddy *mm = &mgr->mm;
-       struct drm_buddy_block *block;
-       unsigned long pages_per_block;
+       uint64_t vis_usage = 0, mem_bytes, max_bytes;
+       struct ttm_range_mgr_node *node;
+       struct drm_mm *mm = &mgr->mm;
+       enum drm_mm_insert_mode mode;
+       unsigned i;
        int r;
 
-       lpfn = place->lpfn << PAGE_SHIFT;
+       lpfn = place->lpfn;
        if (!lpfn)
-               lpfn = man->size;
-
-       fpfn = place->fpfn << PAGE_SHIFT;
+               lpfn = man->size >> PAGE_SHIFT;
 
        max_bytes = adev->gmc.mc_vram_size;
        if (tbo->type != ttm_bo_type_kernel)
                max_bytes -= AMDGPU_VM_RESERVED_VRAM;
 
+       mem_bytes = tbo->base.size;
        if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
-               pages_per_block = ~0ul;
+               pages_per_node = ~0ul;
+               num_nodes = 1;
        } else {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-               pages_per_block = HPAGE_PMD_NR;
+               pages_per_node = HPAGE_PMD_NR;
 #else
                /* default to 2MB */
-               pages_per_block = 2UL << (20UL - PAGE_SHIFT);
+               pages_per_node = 2UL << (20UL - PAGE_SHIFT);
 #endif
-               pages_per_block = max_t(uint32_t, pages_per_block,
-                                       tbo->page_alignment);
+               pages_per_node = max_t(uint32_t, pages_per_node,
+                                      tbo->page_alignment);
+               num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node);
        }
 
-       vres = kzalloc(sizeof(*vres), GFP_KERNEL);
-       if (!vres)
+       node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
+                       GFP_KERNEL | __GFP_ZERO);
+       if (!node)
                return -ENOMEM;
 
-       ttm_resource_init(tbo, place, &vres->base);
+       ttm_resource_init(tbo, place, &node->base);
 
        /* bail out quickly if there's likely not enough VRAM for this BO */
        if (ttm_resource_manager_usage(man) > max_bytes) {
@@ -401,130 +415,66 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
                goto error_fini;
        }
 
-       INIT_LIST_HEAD(&vres->blocks);
-
+       mode = DRM_MM_INSERT_BEST;
        if (place->flags & TTM_PL_FLAG_TOPDOWN)
-               vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
-
-       if (fpfn || lpfn != man->size)
-               /* Allocate blocks in desired range */
-               vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
-
-       remaining_size = vres->base.num_pages << PAGE_SHIFT;
-
-       mutex_lock(&mgr->lock);
-       while (remaining_size) {
-               if (tbo->page_alignment)
-                       min_block_size = tbo->page_alignment << PAGE_SHIFT;
-               else
-                       min_block_size = mgr->default_page_size;
-
-               BUG_ON(min_block_size < mm->chunk_size);
-
-               /* Limit maximum size to 2GiB due to SG table limitations */
-               size = min(remaining_size, 2ULL << 30);
-
-               if (size >= pages_per_block << PAGE_SHIFT)
-                       min_block_size = pages_per_block << PAGE_SHIFT;
-
-               cur_size = size;
-
-               if (fpfn + size != place->lpfn << PAGE_SHIFT) {
-                       /*
-                        * Except for actual range allocation, modify the size and
-                        * min_block_size conforming to continuous flag enablement
-                        */
-                       if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
-                               size = roundup_pow_of_two(size);
-                               min_block_size = size;
-                       /*
-                        * Modify the size value if size is not
-                        * aligned with min_block_size
-                        */
-                       } else if (!IS_ALIGNED(size, min_block_size)) {
-                               size = round_up(size, min_block_size);
+               mode = DRM_MM_INSERT_HIGH;
+
+       pages_left = node->base.num_pages;
+
+       /* Limit maximum size to 2GB due to SG table limitations */
+       pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
+
+       i = 0;
+       spin_lock(&mgr->lock);
+       while (pages_left) {
+               uint32_t alignment = tbo->page_alignment;
+
+               if (pages >= pages_per_node)
+                       alignment = pages_per_node;
+
+               r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages,
+                                               alignment, 0, place->fpfn,
+                                               lpfn, mode);
+               if (unlikely(r)) {
+                       if (pages > pages_per_node) {
+                               if (is_power_of_2(pages))
+                                       pages = pages / 2;
+                               else
+                                       pages = rounddown_pow_of_two(pages);
+                               continue;
                        }
+                       goto error_free;
                }
 
-               r = drm_buddy_alloc_blocks(mm, fpfn,
-                                          lpfn,
-                                          size,
-                                          min_block_size,
-                                          &vres->blocks,
-                                          vres->flags);
-               if (unlikely(r))
-                       goto error_free_blocks;
-
-               if (size > remaining_size)
-                       remaining_size = 0;
-               else
-                       remaining_size -= size;
-       }
-       mutex_unlock(&mgr->lock);
-
-       if (cur_size != size) {
-               struct drm_buddy_block *block;
-               struct list_head *trim_list;
-               u64 original_size;
-               LIST_HEAD(temp);
-
-               trim_list = &vres->blocks;
-               original_size = vres->base.num_pages << PAGE_SHIFT;
-
-               /*
-                * If size value is rounded up to min_block_size, trim the last
-                * block to the required size
-                */
-               if (!list_is_singular(&vres->blocks)) {
-                       block = list_last_entry(&vres->blocks, typeof(*block), link);
-                       list_move_tail(&block->link, &temp);
-                       trim_list = &temp;
-                       /*
-                        * Compute the original_size value by subtracting the
-                        * last block size with (aligned size - original size)
-                        */
-                       original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size);
-               }
+               vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
+               amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
+               pages_left -= pages;
+               ++i;
 
-               mutex_lock(&mgr->lock);
-               drm_buddy_block_trim(mm,
-                                    original_size,
-                                    trim_list);
-               mutex_unlock(&mgr->lock);
-
-               if (!list_empty(&temp))
-                       list_splice_tail(trim_list, &vres->blocks);
-       }
-
-       list_for_each_entry(block, &vres->blocks, link)
-               vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
-
-       block = amdgpu_vram_mgr_first_block(&vres->blocks);
-       if (!block) {
-               r = -EINVAL;
-               goto error_fini;
+               if (pages > pages_left)
+                       pages = pages_left;
        }
+       spin_unlock(&mgr->lock);
 
-       vres->base.start = amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
-
-       if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
-               vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
+       if (i == 1)
+               node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
 
        if (adev->gmc.xgmi.connected_to_cpu)
-               vres->base.bus.caching = ttm_cached;
+               node->base.bus.caching = ttm_cached;
        else
-               vres->base.bus.caching = ttm_write_combined;
+               node->base.bus.caching = ttm_write_combined;
 
        atomic64_add(vis_usage, &mgr->vis_usage);
-       *res = &vres->base;
+       *res = &node->base;
        return 0;
 
-error_free_blocks:
-       drm_buddy_free_list(mm, &vres->blocks);
-       mutex_unlock(&mgr->lock);
+error_free:
+       while (i--)
+               drm_mm_remove_node(&node->mm_nodes[i]);
+       spin_unlock(&mgr->lock);
 error_fini:
-       ttm_resource_fini(man, &vres->base);
-       kfree(vres);
+       ttm_resource_fini(man, &node->base);
+       kvfree(node);
 
        return r;
 }
@@ -540,26 +490,27 @@ error_fini:
 static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
                                struct ttm_resource *res)
 {
-       struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
+       struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
        struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
        struct amdgpu_device *adev = to_amdgpu_device(mgr);
-       struct drm_buddy *mm = &mgr->mm;
-       struct drm_buddy_block *block;
        uint64_t vis_usage = 0;
+       unsigned i, pages;
 
-       mutex_lock(&mgr->lock);
-       list_for_each_entry(block, &vres->blocks, link)
-               vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
+       spin_lock(&mgr->lock);
+       for (i = 0, pages = res->num_pages; pages;
+            pages -= node->mm_nodes[i].size, ++i) {
+               struct drm_mm_node *mm = &node->mm_nodes[i];
 
+               drm_mm_remove_node(mm);
+               vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
+       }
        amdgpu_vram_mgr_do_reserve(man);
-
-       drm_buddy_free_list(mm, &vres->blocks);
-       mutex_unlock(&mgr->lock);
+       spin_unlock(&mgr->lock);
 
        atomic64_sub(vis_usage, &mgr->vis_usage);
 
        ttm_resource_fini(man, res);
-       kfree(vres);
+       kvfree(node);
 }
 
 /**
@@ -591,7 +542,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
        if (!*sgt)
                return -ENOMEM;
 
-       /* Determine the number of DRM_BUDDY blocks to export */
+       /* Determine the number of DRM_MM nodes to export */
        amdgpu_res_first(res, offset, length, &cursor);
        while (cursor.remaining) {
                num_entries++;
@@ -607,10 +558,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
                sg->length = 0;
 
        /*
-        * Walk down DRM_BUDDY blocks to populate scatterlist nodes
-        * @note: Use iterator api to get first the DRM_BUDDY block
+        * Walk down DRM_MM nodes to populate scatterlist nodes
+        * @note: Use iterator api to get first the DRM_MM node
         * and the number of bytes from it. Access the following
-        * DRM_BUDDY block(s) if more buffer needs to exported
+        * DRM_MM node(s) if more buffer needs to exported
         */
        amdgpu_res_first(res, offset, length, &cursor);
        for_each_sgtable_sg((*sgt), sg, i) {
@@ -697,22 +648,13 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
                                  struct drm_printer *printer)
 {
        struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
-       struct drm_buddy *mm = &mgr->mm;
-       struct drm_buddy_block *block;
 
        drm_printf(printer, "  vis usage:%llu\n",
                   amdgpu_vram_mgr_vis_usage(mgr));
 
-       mutex_lock(&mgr->lock);
-       drm_printf(printer, "default_page_size: %lluKiB\n",
-                  mgr->default_page_size >> 10);
-
-       drm_buddy_print(mm, printer);
-
-       drm_printf(printer, "reserved:\n");
-       list_for_each_entry(block, &mgr->reserved_pages, link)
-               drm_buddy_block_print(mm, block, printer);
-       mutex_unlock(&mgr->lock);
+       spin_lock(&mgr->lock);
+       drm_mm_print(&mgr->mm, printer);
+       spin_unlock(&mgr->lock);
 }
 
 static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
@@ -732,21 +674,16 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
 {
        struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
        struct ttm_resource_manager *man = &mgr->manager;
-       int err;
 
        ttm_resource_manager_init(man, &adev->mman.bdev,
                                  adev->gmc.real_vram_size);
 
        man->func = &amdgpu_vram_mgr_func;
 
-       err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
-       if (err)
-               return err;
-
-       mutex_init(&mgr->lock);
+       drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT);
+       spin_lock_init(&mgr->lock);
        INIT_LIST_HEAD(&mgr->reservations_pending);
        INIT_LIST_HEAD(&mgr->reserved_pages);
-       mgr->default_page_size = PAGE_SIZE;
 
        ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
        ttm_resource_manager_set_used(man, true);
@@ -774,16 +711,16 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
        if (ret)
                return;
 
-       mutex_lock(&mgr->lock);
-       list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks)
+       spin_lock(&mgr->lock);
+       list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node)
                kfree(rsv);
 
-       list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
-               drm_buddy_free_list(&mgr->mm, &rsv->blocks);
+       list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) {
+               drm_mm_remove_node(&rsv->mm_node);
                kfree(rsv);
        }
-       drm_buddy_fini(&mgr->mm);
-       mutex_unlock(&mgr->lock);
+       drm_mm_takedown(&mgr->mm);
+       spin_unlock(&mgr->lock);
 
        ttm_resource_manager_cleanup(man);
        ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
deleted file mode 100644 (file)
index 9a2db87..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-/* SPDX-License-Identifier: MIT
- * Copyright 2021 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __AMDGPU_VRAM_MGR_H__
-#define __AMDGPU_VRAM_MGR_H__
-
-#include <drm/drm_buddy.h>
-
-struct amdgpu_vram_mgr {
-       struct ttm_resource_manager manager;
-       struct drm_buddy mm;
-       /* protects access to buffer objects */
-       struct mutex lock;
-       struct list_head reservations_pending;
-       struct list_head reserved_pages;
-       atomic64_t vis_usage;
-       u64 default_page_size;
-};
-
-struct amdgpu_vram_mgr_resource {
-       struct ttm_resource base;
-       struct list_head blocks;
-       unsigned long flags;
-};
-
-static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block)
-{
-       return drm_buddy_block_offset(block);
-}
-
-static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
-{
-       return PAGE_SIZE << drm_buddy_block_order(block);
-}
-
-static inline struct drm_buddy_block *
-amdgpu_vram_mgr_first_block(struct list_head *list)
-{
-       return list_first_entry_or_null(list, struct drm_buddy_block, link);
-}
-
-static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
-{
-       struct drm_buddy_block *block;
-       u64 start, size;
-
-       block = amdgpu_vram_mgr_first_block(head);
-       if (!block)
-               return false;
-
-       while (head != block->link.next) {
-               start = amdgpu_vram_mgr_block_start(block);
-               size = amdgpu_vram_mgr_block_size(block);
-
-               block = list_entry(block->link.next, struct drm_buddy_block, link);
-               if (start + size != amdgpu_vram_mgr_block_start(block))
-                       return false;
-       }
-
-       return true;
-}
-
-static inline struct amdgpu_vram_mgr_resource *
-to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
-{
-       return container_of(res, struct amdgpu_vram_mgr_resource, base);
-}
-
-#endif
index 288fce7..9c964cd 100644 (file)
@@ -2796,7 +2796,8 @@ static int dce_v10_0_sw_init(void *handle)
        adev_to_drm(adev)->mode_config.max_height = 16384;
 
        adev_to_drm(adev)->mode_config.preferred_depth = 24;
-       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+       /* disable prefer shadow for now due to hibernation issues */
+       adev_to_drm(adev)->mode_config.prefer_shadow = 0;
 
        adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
 
index cbe5250..e0ad9f2 100644 (file)
@@ -2914,7 +2914,8 @@ static int dce_v11_0_sw_init(void *handle)
        adev_to_drm(adev)->mode_config.max_height = 16384;
 
        adev_to_drm(adev)->mode_config.preferred_depth = 24;
-       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+       /* disable prefer shadow for now due to hibernation issues */
+       adev_to_drm(adev)->mode_config.prefer_shadow = 0;
 
        adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
 
index 982855e..3caf6f3 100644 (file)
@@ -2673,7 +2673,8 @@ static int dce_v6_0_sw_init(void *handle)
        adev_to_drm(adev)->mode_config.max_width = 16384;
        adev_to_drm(adev)->mode_config.max_height = 16384;
        adev_to_drm(adev)->mode_config.preferred_depth = 24;
-       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+       /* disable prefer shadow for now due to hibernation issues */
+       adev_to_drm(adev)->mode_config.prefer_shadow = 0;
        adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
        adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
 
index 8444074..7c75df5 100644 (file)
@@ -2693,7 +2693,8 @@ static int dce_v8_0_sw_init(void *handle)
        adev_to_drm(adev)->mode_config.max_height = 16384;
 
        adev_to_drm(adev)->mode_config.preferred_depth = 24;
-       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+       /* disable prefer shadow for now due to hibernation issues */
+       adev_to_drm(adev)->mode_config.prefer_shadow = 0;
 
        adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
 
index bf42004..a08769c 100644 (file)
@@ -184,6 +184,8 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
                        /* Navi2x+, Navi1x+ */
                        if (gc_version == IP_VERSION(10, 3, 6))
                                kfd->device_info.no_atomic_fw_version = 14;
+                       else if (gc_version == IP_VERSION(10, 3, 7))
+                               kfd->device_info.no_atomic_fw_version = 3;
                        else if (gc_version >= IP_VERSION(10, 3, 0))
                                kfd->device_info.no_atomic_fw_version = 92;
                        else if (gc_version >= IP_VERSION(10, 1, 1))
index b4029c0..ec6771e 100644 (file)
@@ -6,7 +6,7 @@ config DRM_AMD_DC
        bool "AMD DC - Enable new display engine"
        default y
        select SND_HDA_COMPONENT if SND_HDA_CORE
-       select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
+       select DRM_AMD_DC_DCN if (X86 || PPC_LONG_DOUBLE_128) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
        help
          Choose this option if you want to use the new display engine
          support for AMDGPU. This adds required support for Vega and
index 70be67a..3087dd1 100644 (file)
@@ -72,6 +72,7 @@
 #include <linux/pci.h>
 #include <linux/firmware.h>
 #include <linux/component.h>
+#include <linux/dmi.h>
 
 #include <drm/display/drm_dp_mst_helper.h>
 #include <drm/display/drm_hdmi_helper.h>
@@ -462,6 +463,26 @@ static void dm_pflip_high_irq(void *interrupt_params)
                     vrr_active, (int) !e);
 }
 
+static void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
+{
+       struct drm_crtc *crtc = &acrtc->base;
+       struct drm_device *dev = crtc->dev;
+       unsigned long flags;
+
+       drm_crtc_handle_vblank(crtc);
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       /* Send completion event for cursor-only commits */
+       if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+               drm_crtc_send_vblank_event(crtc, acrtc->event);
+               drm_crtc_vblank_put(crtc);
+               acrtc->event = NULL;
+       }
+
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
 static void dm_vupdate_high_irq(void *interrupt_params)
 {
        struct common_irq_params *irq_params = interrupt_params;
@@ -500,7 +521,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
                 * if a pageflip happened inside front-porch.
                 */
                if (vrr_active) {
-                       drm_crtc_handle_vblank(&acrtc->base);
+                       dm_crtc_handle_vblank(acrtc);
 
                        /* BTR processing for pre-DCE12 ASICs */
                        if (acrtc->dm_irq_params.stream &&
@@ -552,7 +573,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
         * to dm_vupdate_high_irq after end of front-porch.
         */
        if (!vrr_active)
-               drm_crtc_handle_vblank(&acrtc->base);
+               dm_crtc_handle_vblank(acrtc);
 
        /**
         * Following stuff must happen at start of vblank, for crc
@@ -1382,6 +1403,41 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
        return false;
 }
 
+static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
+               },
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
+               },
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
+               },
+       },
+       {}
+};
+
+static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+{
+       const struct dmi_system_id *dmi_id;
+
+       dm->aux_hpd_discon_quirk = false;
+
+       dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
+       if (dmi_id) {
+               dm->aux_hpd_discon_quirk = true;
+               DRM_INFO("aux_hpd_discon_quirk attached\n");
+       }
+}
+
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
        struct dc_init_data init_data;
@@ -1508,6 +1564,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
        }
 
        INIT_LIST_HEAD(&adev->dm.da_list);
+
+       retrieve_dmi_info(&adev->dm);
+
        /* Display Core create. */
        adev->dm.dc = dc_create(&init_data);
 
@@ -1594,7 +1653,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
        adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
 #endif
-       if (dc_enable_dmub_notifications(adev->dm.dc)) {
+       if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
                init_completion(&adev->dm.dmub_aux_transfer_done);
                adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
                if (!adev->dm.dmub_notify) {
@@ -1630,6 +1689,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                goto error;
        }
 
+       /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+        * It is expected that DMUB will resend any pending notifications at this point, for
+        * example HPD from DPIA.
+        */
+       if (dc_is_dmub_outbox_supported(adev->dm.dc))
+               dc_enable_dmub_outbox(adev->dm.dc);
+
        /* create fake encoders for MST */
        dm_dp_create_fake_mst_encoders(adev);
 
@@ -2619,9 +2685,6 @@ static int dm_resume(void *handle)
                 */
                link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
 
-               if (dc_enable_dmub_notifications(adev->dm.dc))
-                       amdgpu_dm_outbox_init(adev);
-
                r = dm_dmub_hw_init(adev);
                if (r)
                        DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -2639,6 +2702,11 @@ static int dm_resume(void *handle)
                        }
                }
 
+               if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+                       amdgpu_dm_outbox_init(adev);
+                       dc_enable_dmub_outbox(adev->dm.dc);
+               }
+
                WARN_ON(!dc_commit_state(dm->dc, dc_state));
 
                dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -2660,13 +2728,15 @@ static int dm_resume(void *handle)
        /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
        dc_resource_state_construct(dm->dc, dm_state->context);
 
-       /* Re-enable outbox interrupts for DPIA. */
-       if (dc_enable_dmub_notifications(adev->dm.dc))
-               amdgpu_dm_outbox_init(adev);
-
        /* Before powering on DC we need to re-initialize DMUB. */
        dm_dmub_hw_resume(adev);
 
+       /* Re-enable outbox interrupts for DPIA. */
+       if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+               amdgpu_dm_outbox_init(adev);
+               dc_enable_dmub_outbox(adev->dm.dc);
+       }
+
        /* power on hardware */
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
 
@@ -2812,7 +2882,7 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
 
 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
 {
-       u32 max_cll, min_cll, max, min, q, r;
+       u32 max_avg, min_cll, max, min, q, r;
        struct amdgpu_dm_backlight_caps *caps;
        struct amdgpu_display_manager *dm;
        struct drm_connector *conn_base;
@@ -2842,7 +2912,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
        caps = &dm->backlight_caps[i];
        caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
        caps->aux_support = false;
-       max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
+       max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
        min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
 
        if (caps->ext_caps->bits.oled == 1 /*||
@@ -2870,8 +2940,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
         * The results of the above expressions can be verified at
         * pre_computed_values.
         */
-       q = max_cll >> 5;
-       r = max_cll % 32;
+       q = max_avg >> 5;
+       r = max_avg % 32;
        max = (1 << q) * pre_computed_values[r];
 
        // min luminance: maxLum * (CV/255)^2 / 100
@@ -3822,7 +3892,8 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
        adev_to_drm(adev)->mode_config.max_height = 16384;
 
        adev_to_drm(adev)->mode_config.preferred_depth = 24;
-       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+       /* disable prefer shadow for now due to hibernation issues */
+       adev_to_drm(adev)->mode_config.prefer_shadow = 0;
        /* indicates support for immediate flip */
        adev_to_drm(adev)->mode_config.async_page_flip = true;
 
@@ -4259,9 +4330,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                }
        }
 
-       /* Disable vblank IRQs aggressively for power-saving. */
-       adev_to_drm(adev)->vblank_disable_immediate = true;
-
        /* loops over all connectors on the board */
        for (i = 0; i < link_cnt; i++) {
                struct dc_link *link = NULL;
@@ -5409,7 +5477,7 @@ fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
                        }
                }
 
-               if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
+               if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
                        *pre_multiplied_alpha = false;
        }
 
@@ -9137,6 +9205,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
        struct amdgpu_bo *abo;
        uint32_t target_vblank, last_flip_vblank;
        bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+       bool cursor_update = false;
        bool pflip_present = false;
        struct {
                struct dc_surface_update surface_updates[MAX_SURFACES];
@@ -9172,8 +9241,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
 
                /* Cursor plane is handled after stream updates */
-               if (plane->type == DRM_PLANE_TYPE_CURSOR)
+               if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+                       if ((fb && crtc == pcrtc) ||
+                           (old_plane_state->fb && old_plane_state->crtc == pcrtc))
+                               cursor_update = true;
+
                        continue;
+               }
 
                if (!fb || !crtc || pcrtc != crtc)
                        continue;
@@ -9336,6 +9410,17 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                                bundle->stream_update.vrr_infopacket =
                                        &acrtc_state->stream->vrr_infopacket;
                }
+       } else if (cursor_update && acrtc_state->active_planes > 0 &&
+                  !acrtc_state->force_dpms_off &&
+                  acrtc_attach->base.state->event) {
+               drm_crtc_vblank_get(pcrtc);
+
+               spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+
+               acrtc_attach->event = acrtc_attach->base.state->event;
+               acrtc_attach->base.state->event = NULL;
+
+               spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
        }
 
        /* Update the planes if changed or disable if we don't have any. */
index aa34c00..e80ef93 100644 (file)
@@ -540,6 +540,14 @@ struct amdgpu_display_manager {
         * last successfully applied backlight values.
         */
        u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
+
+       /**
+        * @aux_hpd_discon_quirk:
+        *
+        * quirk for hpd discon while aux is on-going.
+        * occurred on certain intel platform
+        */
+       bool aux_hpd_discon_quirk;
 };
 
 enum dsc_clock_force_state {
index 9221b66..2b9b095 100644 (file)
@@ -56,6 +56,8 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
        ssize_t result = 0;
        struct aux_payload payload;
        enum aux_return_code_type operation_result;
+       struct amdgpu_device *adev;
+       struct ddc_service *ddc;
 
        if (WARN_ON(msg->size > 16))
                return -E2BIG;
@@ -74,6 +76,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
        result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
                                      &operation_result);
 
+       /*
+        * w/a on certain intel platform where hpd is unexpected to pull low during
+        * 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON
+        * aux transaction is succuess in such case, therefore bypass the error
+        */
+       ddc = TO_DM_AUX(aux)->ddc_service;
+       adev = ddc->ctx->driver_context;
+       if (adev->dm.aux_hpd_discon_quirk) {
+               if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
+                       operation_result == AUX_RET_ERROR_HPD_DISCON) {
+                       result = 0;
+                       operation_result = AUX_RET_SUCCESS;
+               }
+       }
+
        if (payload.write && result >= 0)
                result = msg->size;
 
index fb4ae80..f438172 100644 (file)
@@ -550,7 +550,7 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
                if (!bw_params->clk_table.entries[i].dtbclk_mhz)
                        bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
        }
-       ASSERT(bw_params->clk_table.entries[i].dcfclk_mhz);
+       ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
        bw_params->vram_type = bios_info->memory_type;
        bw_params->num_channels = bios_info->ma_channel_number;
        if (!bw_params->num_channels)
index cbc47ae..d8eee89 100644 (file)
@@ -944,7 +944,7 @@ static void override_lane_settings(const struct link_training_settings *lt_setti
 
                return;
 
-       for (lane = 1; lane < LANE_COUNT_DP_MAX; lane++) {
+       for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
                if (lt_settings->voltage_swing)
                        lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing;
                if (lt_settings->pre_emphasis)
index 6774dd8..3fe3fba 100644 (file)
@@ -1117,12 +1117,13 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
         * on certain displays, such as the Sharp 4k. 36bpp is needed
         * to support SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 and
         * SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 with actual > 10 bpc
-        * precision on at least DCN display engines. However, at least
-        * Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth,
-        * so use only 30 bpp on DCE_VERSION_11_0. Testing with DCE 11.2 and 8.3
-        * did not show such problems, so this seems to be the exception.
+        * precision on DCN display engines, but apparently not for DCE, as
+        * far as testing on DCE-11.2 and DCE-8 showed. Various DCE parts have
+        * problems: Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth,
+        * neither do DCE-8 at 4k resolution, or DCE-11.2 (broken identify pixel
+        * passthrough). Therefore only use 36 bpp on DCN where it is actually needed.
         */
-       if (plane_state->ctx->dce_version > DCE_VERSION_11_0)
+       if (plane_state->ctx->dce_version > DCE_VERSION_MAX)
                pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
        else
                pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
index 7eff781..5f2afa5 100644 (file)
@@ -1766,29 +1766,9 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
                                break;
                        }
                }
-
-               /*
-                * TO-DO: So far the code logic below only addresses single eDP case.
-                * For dual eDP case, there are a few things that need to be
-                * implemented first:
-                *
-                * 1. Change the fastboot logic above, so eDP link[0 or 1]'s
-                * stream[0 or 1] will all be checked.
-                *
-                * 2. Change keep_edp_vdd_on to an array, and maintain keep_edp_vdd_on
-                * for each eDP.
-                *
-                * Once above 2 things are completed, we can then change the logic below
-                * correspondingly, so dual eDP case will be fully covered.
-                */
-
-               // We are trying to enable eDP, don't power down VDD if eDP stream is existing
-               if ((edp_stream_num == 1 && edp_streams[0] != NULL) || can_apply_edp_fast_boot) {
+               // We are trying to enable eDP, don't power down VDD
+               if (can_apply_edp_fast_boot)
                        keep_edp_vdd_on = true;
-                       DC_LOG_EVENT_LINK_TRAINING("Keep eDP Vdd on\n");
-               } else {
-                       DC_LOG_EVENT_LINK_TRAINING("No eDP stream enabled, turn eDP Vdd off\n");
-               }
        }
 
        // Check seamless boot support
index 970b65e..eaa7032 100644 (file)
@@ -212,6 +212,9 @@ static void dpp2_cnv_setup (
                break;
        }
 
+       /* Set default color space based on format if none is given. */
+       color_space = input_color_space ? input_color_space : color_space;
+
        if (is_2bit == 1 && alpha_2bit_lut != NULL) {
                REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0);
                REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1);
index 8b6505b..f50ab96 100644 (file)
@@ -153,6 +153,9 @@ static void dpp201_cnv_setup(
                break;
        }
 
+       /* Set default color space based on format if none is given. */
+       color_space = input_color_space ? input_color_space : color_space;
+
        if (is_2bit == 1 && alpha_2bit_lut != NULL) {
                REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0);
                REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1);
index ab3918c..0dcc075 100644 (file)
@@ -294,6 +294,9 @@ static void dpp3_cnv_setup (
                break;
        }
 
+       /* Set default color space based on format if none is given. */
+       color_space = input_color_space ? input_color_space : color_space;
+
        if (is_2bit == 1 && alpha_2bit_lut != NULL) {
                REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0);
                REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1);
index 5f8809f..2fbd292 100644 (file)
@@ -1228,6 +1228,8 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
        uint32_t crystal_clock_freq = 2500;
        uint32_t tach_period;
 
+       if (speed == 0)
+               return -EINVAL;
        /*
         * To prevent from possible overheat, some ASICs may have requirement
         * for minimum fan speed:
index b2675c7..4b503c5 100644 (file)
@@ -74,22 +74,6 @@ static int fsl_ldb_attach(struct drm_bridge *bridge,
                                 bridge, flags);
 }
 
-static int fsl_ldb_atomic_check(struct drm_bridge *bridge,
-                               struct drm_bridge_state *bridge_state,
-                               struct drm_crtc_state *crtc_state,
-                               struct drm_connector_state *conn_state)
-{
-       /* Invert DE signal polarity. */
-       bridge_state->input_bus_cfg.flags &= ~(DRM_BUS_FLAG_DE_LOW |
-                                              DRM_BUS_FLAG_DE_HIGH);
-       if (bridge_state->output_bus_cfg.flags & DRM_BUS_FLAG_DE_LOW)
-               bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_HIGH;
-       else if (bridge_state->output_bus_cfg.flags & DRM_BUS_FLAG_DE_HIGH)
-               bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_LOW;
-
-       return 0;
-}
-
 static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
                                  struct drm_bridge_state *old_bridge_state)
 {
@@ -153,7 +137,7 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
        reg = LDB_CTRL_CH0_ENABLE;
 
        if (fsl_ldb->lvds_dual_link)
-               reg |= LDB_CTRL_CH1_ENABLE;
+               reg |= LDB_CTRL_CH1_ENABLE | LDB_CTRL_SPLIT_MODE;
 
        if (lvds_format_24bpp) {
                reg |= LDB_CTRL_CH0_DATA_WIDTH;
@@ -233,7 +217,7 @@ fsl_ldb_mode_valid(struct drm_bridge *bridge,
 {
        struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
 
-       if (mode->clock > (fsl_ldb->lvds_dual_link ? 80000 : 160000))
+       if (mode->clock > (fsl_ldb->lvds_dual_link ? 160000 : 80000))
                return MODE_CLOCK_HIGH;
 
        return MODE_OK;
@@ -241,7 +225,6 @@ fsl_ldb_mode_valid(struct drm_bridge *bridge,
 
 static const struct drm_bridge_funcs funcs = {
        .attach = fsl_ldb_attach,
-       .atomic_check = fsl_ldb_atomic_check,
        .atomic_enable = fsl_ldb_atomic_enable,
        .atomic_disable = fsl_ldb_atomic_disable,
        .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
index 74bd4a7..059fd71 100644 (file)
@@ -329,7 +329,20 @@ int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
                                                     const struct drm_driver *req_driver)
 {
        resource_size_t base, size;
-       int bar, ret = 0;
+       int bar, ret;
+
+       /*
+        * WARNING: Apparently we must kick fbdev drivers before vgacon,
+        * otherwise the vga fbdev driver falls over.
+        */
+#if IS_REACHABLE(CONFIG_FB)
+       ret = remove_conflicting_pci_framebuffers(pdev, req_driver->name);
+       if (ret)
+               return ret;
+#endif
+       ret = vga_remove_vgacon(pdev);
+       if (ret)
+               return ret;
 
        for (bar = 0; bar < PCI_STD_NUM_BARS; ++bar) {
                if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
@@ -339,15 +352,6 @@ int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
                drm_aperture_detach_drivers(base, size);
        }
 
-       /*
-        * WARNING: Apparently we must kick fbdev drivers before vgacon,
-        * otherwise the vga fbdev driver falls over.
-        */
-#if IS_REACHABLE(CONFIG_FB)
-       ret = remove_conflicting_pci_framebuffers(pdev, req_driver->name);
-#endif
-       if (ret == 0)
-               ret = vga_remove_vgacon(pdev);
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL(drm_aperture_remove_conflicting_pci_framebuffers);
index d5962a3..e5fc875 100644 (file)
@@ -64,8 +64,13 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem,
                     struct iosys_map *map)
 {
        struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+       int ret;
+
+       dma_resv_lock(gem->resv, NULL);
+       ret = ttm_bo_vmap(bo, map);
+       dma_resv_unlock(gem->resv);
 
-       return ttm_bo_vmap(bo, map);
+       return ret;
 }
 EXPORT_SYMBOL(drm_gem_ttm_vmap);
 
@@ -82,7 +87,9 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
 {
        struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
 
+       dma_resv_lock(gem->resv, NULL);
        ttm_bo_vunmap(bo, map);
+       dma_resv_unlock(gem->resv);
 }
 EXPORT_SYMBOL(drm_gem_ttm_vunmap);
 
index 4e853ac..d4e0f2e 100644 (file)
@@ -152,6 +152,12 @@ static const struct dmi_system_id orientation_data[] = {
                  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"),
                },
                .driver_data = (void *)&lcd800x1280_rightside_up,
+       }, {    /* AYA NEO NEXT */
+               .matches = {
+                 DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+                 DMI_MATCH(DMI_BOARD_NAME, "NEXT"),
+               },
+               .driver_data = (void *)&lcd800x1280_rightside_up,
        }, {    /* Chuwi HiBook (CWI514) */
                .matches = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
@@ -280,6 +286,21 @@ static const struct dmi_system_id orientation_data[] = {
                  DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
                },
                .driver_data = (void *)&lcd1200x1920_rightside_up,
+       }, {    /* Lenovo Yoga Tablet 2 830F / 830L */
+               .matches = {
+                /*
+                 * Note this also matches the Lenovo Yoga Tablet 2 1050F/L
+                 * since that uses the same mainboard. The resolution match
+                 * will limit this to only matching on the 830F/L. Neither has
+                 * any external video outputs so those are not a concern.
+                 */
+                DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
+                DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
+                DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
+                /* Partial match on beginning of BIOS version */
+                DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
+               },
+               .driver_data = (void *)&lcd1200x1920_rightside_up,
        }, {    /* OneGX1 Pro */
                .matches = {
                  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
index 424ea23..16c5396 100644 (file)
@@ -177,15 +177,15 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
                DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
                DRM_COMPONENT_DRIVER
        }, {
-               DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
-               DRM_COMPONENT_DRIVER
-       }, {
                DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP),
                DRM_COMPONENT_DRIVER
        }, {
                DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI),
                DRM_COMPONENT_DRIVER
        }, {
+               DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
+               DRM_COMPONENT_DRIVER
+       }, {
                DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI),
                DRM_COMPONENT_DRIVER
        }, {
index 9e06f8e..09ce28e 100644 (file)
@@ -26,6 +26,7 @@
 #include <drm/drm_print.h>
 
 #include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
 
 /* Sysreg registers for MIC */
 #define DSD_CFG_MUX    0x1004
@@ -100,9 +101,7 @@ struct exynos_mic {
 
        bool i80_mode;
        struct videomode vm;
-       struct drm_encoder *encoder;
        struct drm_bridge bridge;
-       struct drm_bridge *next_bridge;
 
        bool enabled;
 };
@@ -229,8 +228,6 @@ static void mic_set_reg_on(struct exynos_mic *mic, bool enable)
        writel(reg, mic->reg + MIC_OP);
 }
 
-static void mic_disable(struct drm_bridge *bridge) { }
-
 static void mic_post_disable(struct drm_bridge *bridge)
 {
        struct exynos_mic *mic = bridge->driver_private;
@@ -297,34 +294,30 @@ unlock:
        mutex_unlock(&mic_mutex);
 }
 
-static void mic_enable(struct drm_bridge *bridge) { }
-
-static int mic_attach(struct drm_bridge *bridge,
-                     enum drm_bridge_attach_flags flags)
-{
-       struct exynos_mic *mic = bridge->driver_private;
-
-       return drm_bridge_attach(bridge->encoder, mic->next_bridge,
-                                &mic->bridge, flags);
-}
-
 static const struct drm_bridge_funcs mic_bridge_funcs = {
-       .disable = mic_disable,
        .post_disable = mic_post_disable,
        .mode_set = mic_mode_set,
        .pre_enable = mic_pre_enable,
-       .enable = mic_enable,
-       .attach = mic_attach,
 };
 
 static int exynos_mic_bind(struct device *dev, struct device *master,
                           void *data)
 {
        struct exynos_mic *mic = dev_get_drvdata(dev);
+       struct drm_device *drm_dev = data;
+       struct exynos_drm_crtc *crtc = exynos_drm_crtc_get_by_type(drm_dev,
+                                                      EXYNOS_DISPLAY_TYPE_LCD);
+       struct drm_encoder *e, *encoder = NULL;
+
+       drm_for_each_encoder(e, drm_dev)
+               if (e->possible_crtcs == drm_crtc_mask(&crtc->base))
+                       encoder = e;
+       if (!encoder)
+               return -ENODEV;
 
        mic->bridge.driver_private = mic;
 
-       return 0;
+       return drm_bridge_attach(encoder, &mic->bridge, NULL, 0);
 }
 
 static void exynos_mic_unbind(struct device *dev, struct device *master,
@@ -388,7 +381,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct exynos_mic *mic;
-       struct device_node *remote;
        struct resource res;
        int ret, i;
 
@@ -432,16 +424,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
                }
        }
 
-       remote = of_graph_get_remote_node(dev->of_node, 1, 0);
-       mic->next_bridge = of_drm_find_bridge(remote);
-       if (IS_ERR(mic->next_bridge)) {
-               DRM_DEV_ERROR(dev, "mic: Failed to find next bridge\n");
-               ret = PTR_ERR(mic->next_bridge);
-               goto err;
-       }
-
-       of_node_put(remote);
-
        platform_set_drvdata(pdev, mic);
 
        mic->bridge.funcs = &mic_bridge_funcs;
index e4a79c1..ff67899 100644 (file)
@@ -388,13 +388,23 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp)
        return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
 }
 
+static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy)
+{
+       u32 voltage;
+
+       voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK;
+
+       return voltage == VOLTAGE_INFO_0_85V;
+}
+
 static int icl_max_source_rate(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
        enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
 
-       if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
+       if (intel_phy_is_combo(dev_priv, phy) &&
+           (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp)))
                return 540000;
 
        return 810000;
@@ -402,7 +412,23 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
 
 static int ehl_max_source_rate(struct intel_dp *intel_dp)
 {
-       if (intel_dp_is_edp(intel_dp))
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+       enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+
+       if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy))
+               return 540000;
+
+       return 810000;
+}
+
+static int dg1_max_source_rate(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+
+       if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy))
                return 540000;
 
        return 810000;
@@ -445,7 +471,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
                        max_rate = dg2_max_source_rate(intel_dp);
                else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
                         IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
-                       max_rate = 810000;
+                       max_rate = dg1_max_source_rate(intel_dp);
                else if (IS_JSL_EHL(dev_priv))
                        max_rate = ehl_max_source_rate(intel_dp);
                else
index 061b277..14d2a64 100644 (file)
@@ -839,6 +839,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
        ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
                                 DRM_MODE_CONNECTOR_DisplayPort);
        if (ret) {
+               drm_dp_mst_put_port_malloc(port);
                intel_connector_free(intel_connector);
                return NULL;
        }
index 22f5557..88c2f38 100644 (file)
@@ -2396,7 +2396,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
 }
 
 /*
- * Display WA #22010492432: ehl, tgl, adl-p
+ * Display WA #22010492432: ehl, tgl, adl-s, adl-p
  * Program half of the nominal DCO divider fraction value.
  */
 static bool
@@ -2404,7 +2404,7 @@ ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
 {
        return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
                 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
-                IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) &&
+                IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
                 i915->dpll.ref_clks.nssc == 38400;
 }
 
index ab4c5ab..321af10 100644 (file)
@@ -933,8 +933,9 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
        case I915_CONTEXT_PARAM_PERSISTENCE:
                if (args->size)
                        ret = -EINVAL;
-               ret = proto_context_set_persistence(fpriv->dev_priv, pc,
-                                                   args->value);
+               else
+                       ret = proto_context_set_persistence(fpriv->dev_priv, pc,
+                                                           args->value);
                break;
 
        case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
index 3e5d605..1674b0c 100644 (file)
@@ -35,12 +35,12 @@ bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
        if (obj->cache_dirty)
                return false;
 
-       if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
-               return true;
-
        if (IS_DGFX(i915))
                return false;
 
+       if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+               return true;
+
        /* Currently in use by HW (display engine)? Keep flushed. */
        return i915_gem_object_is_framebuffer(obj);
 }
index c326bd2..30fe847 100644 (file)
@@ -999,7 +999,8 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
                        }
                }
 
-               err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
+               /* Reserve enough slots to accommodate composite fences */
+               err = dma_resv_reserve_fences(vma->obj->base.resv, eb->num_batches);
                if (err)
                        return err;
 
index f46ee16..a4fb577 100644 (file)
@@ -60,6 +60,8 @@ __i915_gem_object_create_region(struct intel_memory_region *mem,
        if (page_size)
                default_page_size = page_size;
 
+       /* We should be able to fit a page within an sg entry */
+       GEM_BUG_ON(overflows_type(default_page_size, u32));
        GEM_BUG_ON(!is_power_of_2_u64(default_page_size));
        GEM_BUG_ON(default_page_size < PAGE_SIZE);
 
index 4c25d9b..8f1bb6a 100644 (file)
@@ -620,10 +620,15 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
                         struct ttm_resource *res)
 {
        struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+       u32 page_alignment;
 
        if (!i915_ttm_gtt_binds_lmem(res))
                return i915_ttm_tt_get_st(bo->ttm);
 
+       page_alignment = bo->page_alignment << PAGE_SHIFT;
+       if (!page_alignment)
+               page_alignment = obj->mm.region->min_page_size;
+
        /*
         * If CPU mapping differs, we need to add the ttm_tt pages to
         * the resulting st. Might make sense for GGTT.
@@ -634,7 +639,8 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
                        struct i915_refct_sgt *rsgt;
 
                        rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
-                                                                res);
+                                                                res,
+                                                                page_alignment);
                        if (IS_ERR(rsgt))
                                return rsgt;
 
@@ -643,7 +649,8 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
                return i915_refct_sgt_get(obj->ttm.cached_io_rsgt);
        }
 
-       return intel_region_ttm_resource_to_rsgt(obj->mm.region, res);
+       return intel_region_ttm_resource_to_rsgt(obj->mm.region, res,
+                                                page_alignment);
 }
 
 static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
index 319936f..e6e01c2 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/jiffies.h>
 
 #include "gt/intel_engine.h"
+#include "gt/intel_rps.h"
 
 #include "i915_gem_ioctls.h"
 #include "i915_gem_object.h"
@@ -31,6 +32,37 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
                                      timeout);
 }
 
+static void
+i915_gem_object_boost(struct dma_resv *resv, unsigned int flags)
+{
+       struct dma_resv_iter cursor;
+       struct dma_fence *fence;
+
+       /*
+        * Prescan all fences for potential boosting before we begin waiting.
+        *
+        * When we wait, we wait on outstanding fences serially. If the
+        * dma-resv contains a sequence such as 1:1, 1:2 instead of a reduced
+        * form 1:2, then as we look at each wait in turn we see that each
+        * request is currently executing and not worthy of boosting. But if
+        * we only happen to look at the final fence in the sequence (because
+        * of request coalescing or splitting between read/write arrays by
+        * the iterator), then we would boost. As such our decision to boost
+        * or not is delicately balanced on the order we wait on fences.
+        *
+        * So instead of looking for boosts sequentially, look for all boosts
+        * upfront and then wait on the outstanding fences.
+        */
+
+       dma_resv_iter_begin(&cursor, resv,
+                           dma_resv_usage_rw(flags & I915_WAIT_ALL));
+       dma_resv_for_each_fence_unlocked(&cursor, fence)
+               if (dma_fence_is_i915(fence) &&
+                   !i915_request_started(to_request(fence)))
+                       intel_rps_boost(to_request(fence));
+       dma_resv_iter_end(&cursor);
+}
+
 static long
 i915_gem_object_wait_reservation(struct dma_resv *resv,
                                 unsigned int flags,
@@ -40,6 +72,8 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
        struct dma_fence *fence;
        long ret = timeout ?: 1;
 
+       i915_gem_object_boost(resv, flags);
+
        dma_resv_iter_begin(&cursor, resv,
                            dma_resv_usage_rw(flags & I915_WAIT_ALL));
        dma_resv_for_each_fence_unlocked(&cursor, fence) {
index 09f8254..44e7339 100644 (file)
@@ -273,10 +273,17 @@ struct intel_context {
                u8 child_index;
                /** @guc: GuC specific members for parallel submission */
                struct {
-                       /** @wqi_head: head pointer in work queue */
+                       /** @wqi_head: cached head pointer in work queue */
                        u16 wqi_head;
-                       /** @wqi_tail: tail pointer in work queue */
+                       /** @wqi_tail: cached tail pointer in work queue */
                        u16 wqi_tail;
+                       /** @wq_head: pointer to the actual head in work queue */
+                       u32 *wq_head;
+                       /** @wq_tail: pointer to the actual head in work queue */
+                       u32 *wq_tail;
+                       /** @wq_status: pointer to the status in work queue */
+                       u32 *wq_status;
+
                        /**
                         * @parent_page: page in context state (ce->state) used
                         * by parent for work queue, process descriptor
index 1431f1e..04e435b 100644 (file)
@@ -201,6 +201,8 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine);
 int intel_engine_stop_cs(struct intel_engine_cs *engine);
 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
 
+void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine);
+
 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
 
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
index 14c6ddb..5b6ce10 100644 (file)
@@ -1282,10 +1282,10 @@ static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
        intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
 
        /*
-        * Wa_22011802037 : gen12, Prior to doing a reset, ensure CS is
+        * Wa_22011802037 : gen11, gen12, Prior to doing a reset, ensure CS is
         * stopped, set ring stop bit and prefetch disable bit to halt CS
         */
-       if (GRAPHICS_VER(engine->i915) == 12)
+       if (IS_GRAPHICS_VER(engine->i915, 11, 12))
                intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
                                      _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
 
@@ -1308,6 +1308,18 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
                return -ENODEV;
 
        ENGINE_TRACE(engine, "\n");
+       /*
+        * TODO: Find out why occasionally stopping the CS times out. Seen
+        * especially with gem_eio tests.
+        *
+        * Occasionally trying to stop the cs times out, but does not adversely
+        * affect functionality. The timeout is set as a config parameter that
+        * defaults to 100ms. In most cases the follow up operation is to wait
+        * for pending MI_FORCE_WAKES. The assumption is that this timeout is
+        * sufficient for any pending MI_FORCEWAKEs to complete. Once root
+        * caused, the caller must check and handle the return from this
+        * function.
+        */
        if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
                ENGINE_TRACE(engine,
                             "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
@@ -1334,6 +1346,78 @@ void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
        ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
+static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
+{
+       static const i915_reg_t _reg[I915_NUM_ENGINES] = {
+               [RCS0] = MSG_IDLE_CS,
+               [BCS0] = MSG_IDLE_BCS,
+               [VCS0] = MSG_IDLE_VCS0,
+               [VCS1] = MSG_IDLE_VCS1,
+               [VCS2] = MSG_IDLE_VCS2,
+               [VCS3] = MSG_IDLE_VCS3,
+               [VCS4] = MSG_IDLE_VCS4,
+               [VCS5] = MSG_IDLE_VCS5,
+               [VCS6] = MSG_IDLE_VCS6,
+               [VCS7] = MSG_IDLE_VCS7,
+               [VECS0] = MSG_IDLE_VECS0,
+               [VECS1] = MSG_IDLE_VECS1,
+               [VECS2] = MSG_IDLE_VECS2,
+               [VECS3] = MSG_IDLE_VECS3,
+               [CCS0] = MSG_IDLE_CS,
+               [CCS1] = MSG_IDLE_CS,
+               [CCS2] = MSG_IDLE_CS,
+               [CCS3] = MSG_IDLE_CS,
+       };
+       u32 val;
+
+       if (!_reg[engine->id].reg) {
+               drm_err(&engine->i915->drm,
+                       "MSG IDLE undefined for engine id %u\n", engine->id);
+               return 0;
+       }
+
+       val = intel_uncore_read(engine->uncore, _reg[engine->id]);
+
+       /* bits[29:25] & bits[13:9] >> shift */
+       return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT;
+}
+
+static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask)
+{
+       int ret;
+
+       /* Ensure GPM receives fw up/down after CS is stopped */
+       udelay(1);
+
+       /* Wait for forcewake request to complete in GPM */
+       ret =  __intel_wait_for_register_fw(gt->uncore,
+                                           GEN9_PWRGT_DOMAIN_STATUS,
+                                           fw_mask, fw_mask, 5000, 0, NULL);
+
+       /* Ensure CS receives fw ack from GPM */
+       udelay(1);
+
+       if (ret)
+               GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret);
+}
+
+/*
+ * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any
+ * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The
+ * pending status is indicated by bits[13:9] (masked by bits[29:25]) in the
+ * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we
+ * are concerned only with the gt reset here, we use a logical OR of pending
+ * forcewakeups from all reset domains and then wait for them to complete by
+ * querying PWRGT_DOMAIN_STATUS.
+ */
+void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine)
+{
+       u32 fw_pending = __cs_pending_mi_force_wakes(engine);
+
+       if (fw_pending)
+               __gpm_wait_for_fw_complete(engine->gt, fw_pending);
+}
+
 static u32
 read_subslice_reg(const struct intel_engine_cs *engine,
                  int slice, int subslice, i915_reg_t reg)
index 86f7a9a..0627fa1 100644 (file)
@@ -661,6 +661,16 @@ static inline void execlists_schedule_out(struct i915_request *rq)
        i915_request_put(rq);
 }
 
+static u32 map_i915_prio_to_lrc_desc_prio(int prio)
+{
+       if (prio > I915_PRIORITY_NORMAL)
+               return GEN12_CTX_PRIORITY_HIGH;
+       else if (prio < I915_PRIORITY_NORMAL)
+               return GEN12_CTX_PRIORITY_LOW;
+       else
+               return GEN12_CTX_PRIORITY_NORMAL;
+}
+
 static u64 execlists_update_context(struct i915_request *rq)
 {
        struct intel_context *ce = rq->context;
@@ -669,7 +679,7 @@ static u64 execlists_update_context(struct i915_request *rq)
 
        desc = ce->lrc.desc;
        if (rq->engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
-               desc |= lrc_desc_priority(rq_prio(rq));
+               desc |= map_i915_prio_to_lrc_desc_prio(rq_prio(rq));
 
        /*
         * WaIdleLiteRestore:bdw,skl
@@ -2958,6 +2968,13 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
        ring_set_paused(engine, 1);
        intel_engine_stop_cs(engine);
 
+       /*
+        * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need
+        * to wait for any pending mi force wakeups
+        */
+       if (IS_GRAPHICS_VER(engine->i915, 11, 12))
+               intel_engine_wait_for_pending_mi_fw(engine);
+
        engine->execlists.reset_ccid = active_ccid(engine);
 }
 
index 53307ca..531af6a 100644 (file)
@@ -785,6 +785,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
 {
        intel_wakeref_t wakeref;
 
+       intel_gt_sysfs_unregister(gt);
        intel_rps_driver_unregister(&gt->rps);
        intel_gsc_fini(&gt->gsc);
 
@@ -1208,6 +1209,20 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
        mutex_lock(&gt->tlb_invalidate_lock);
        intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 
+       spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
+
+       for_each_engine(engine, gt, id) {
+               struct reg_and_bit rb;
+
+               rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
+               if (!i915_mmio_reg_offset(rb.reg))
+                       continue;
+
+               intel_uncore_write_fw(uncore, rb.reg, rb.bit);
+       }
+
+       spin_unlock_irq(&uncore->lock);
+
        for_each_engine(engine, gt, id) {
                /*
                 * HW architecture suggest typical invalidation time at 40us,
@@ -1222,7 +1237,6 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
                if (!i915_mmio_reg_offset(rb.reg))
                        continue;
 
-               intel_uncore_write_fw(uncore, rb.reg, rb.bit);
                if (__intel_wait_for_register_fw(uncore,
                                                 rb.reg, rb.bit, 0,
                                                 timeout_us, timeout_ms,
index 8ec8bc6..9e4ebf5 100644 (file)
@@ -24,7 +24,7 @@ bool is_object_gt(struct kobject *kobj)
 
 static struct intel_gt *kobj_to_gt(struct kobject *kobj)
 {
-       return container_of(kobj, struct kobj_gt, base)->gt;
+       return container_of(kobj, struct intel_gt, sysfs_gt);
 }
 
 struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
@@ -72,9 +72,9 @@ static struct attribute *id_attrs[] = {
 };
 ATTRIBUTE_GROUPS(id);
 
+/* A kobject needs a release() method even if it does nothing */
 static void kobj_gt_release(struct kobject *kobj)
 {
-       kfree(kobj);
 }
 
 static struct kobj_type kobj_gt_type = {
@@ -85,8 +85,6 @@ static struct kobj_type kobj_gt_type = {
 
 void intel_gt_sysfs_register(struct intel_gt *gt)
 {
-       struct kobj_gt *kg;
-
        /*
         * We need to make things right with the
         * ABI compatibility. The files were originally
@@ -98,25 +96,22 @@ void intel_gt_sysfs_register(struct intel_gt *gt)
        if (gt_is_root(gt))
                intel_gt_sysfs_pm_init(gt, gt_get_parent_obj(gt));
 
-       kg = kzalloc(sizeof(*kg), GFP_KERNEL);
-       if (!kg)
+       /* init and xfer ownership to sysfs tree */
+       if (kobject_init_and_add(&gt->sysfs_gt, &kobj_gt_type,
+                                gt->i915->sysfs_gt, "gt%d", gt->info.id))
                goto exit_fail;
 
-       kobject_init(&kg->base, &kobj_gt_type);
-       kg->gt = gt;
-
-       /* xfer ownership to sysfs tree */
-       if (kobject_add(&kg->base, gt->i915->sysfs_gt, "gt%d", gt->info.id))
-               goto exit_kobj_put;
-
-       intel_gt_sysfs_pm_init(gt, &kg->base);
+       intel_gt_sysfs_pm_init(gt, &gt->sysfs_gt);
 
        return;
 
-exit_kobj_put:
-       kobject_put(&kg->base);
-
 exit_fail:
+       kobject_put(&gt->sysfs_gt);
        drm_warn(&gt->i915->drm,
                 "failed to initialize gt%d sysfs root\n", gt->info.id);
 }
+
+void intel_gt_sysfs_unregister(struct intel_gt *gt)
+{
+       kobject_put(&gt->sysfs_gt);
+}
index 9471b26..a99aa7e 100644 (file)
 
 struct intel_gt;
 
-struct kobj_gt {
-       struct kobject base;
-       struct intel_gt *gt;
-};
-
 bool is_object_gt(struct kobject *kobj);
 
 struct drm_i915_private *kobj_to_i915(struct kobject *kobj);
@@ -28,6 +23,7 @@ intel_gt_create_kobj(struct intel_gt *gt,
                     const char *name);
 
 void intel_gt_sysfs_register(struct intel_gt *gt);
+void intel_gt_sysfs_unregister(struct intel_gt *gt);
 struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
                                            const char *name);
 
index b06611c..edd7a3c 100644 (file)
@@ -224,6 +224,9 @@ struct intel_gt {
        } mocs;
 
        struct intel_pxp pxp;
+
+       /* gt/gtN sysfs */
+       struct kobject sysfs_gt;
 };
 
 enum intel_gt_scratch_field {
index 31be734..a390f08 100644 (file)
@@ -111,16 +111,6 @@ enum {
 #define XEHP_SW_COUNTER_SHIFT                  58
 #define XEHP_SW_COUNTER_WIDTH                  6
 
-static inline u32 lrc_desc_priority(int prio)
-{
-       if (prio > I915_PRIORITY_NORMAL)
-               return GEN12_CTX_PRIORITY_HIGH;
-       else if (prio < I915_PRIORITY_NORMAL)
-               return GEN12_CTX_PRIORITY_LOW;
-       else
-               return GEN12_CTX_PRIORITY_NORMAL;
-}
-
 static inline void lrc_runtime_start(struct intel_context *ce)
 {
        struct intel_context_stats *stats = &ce->stats;
index a5338c3..c68d36f 100644 (file)
@@ -300,9 +300,9 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
        return err;
 }
 
-static int gen6_reset_engines(struct intel_gt *gt,
-                             intel_engine_mask_t engine_mask,
-                             unsigned int retry)
+static int __gen6_reset_engines(struct intel_gt *gt,
+                               intel_engine_mask_t engine_mask,
+                               unsigned int retry)
 {
        struct intel_engine_cs *engine;
        u32 hw_mask;
@@ -321,6 +321,20 @@ static int gen6_reset_engines(struct intel_gt *gt,
        return gen6_hw_domain_reset(gt, hw_mask);
 }
 
+static int gen6_reset_engines(struct intel_gt *gt,
+                             intel_engine_mask_t engine_mask,
+                             unsigned int retry)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&gt->uncore->lock, flags);
+       ret = __gen6_reset_engines(gt, engine_mask, retry);
+       spin_unlock_irqrestore(&gt->uncore->lock, flags);
+
+       return ret;
+}
+
 static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)
 {
        int vecs_id;
@@ -487,9 +501,9 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine)
        rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
 }
 
-static int gen11_reset_engines(struct intel_gt *gt,
-                              intel_engine_mask_t engine_mask,
-                              unsigned int retry)
+static int __gen11_reset_engines(struct intel_gt *gt,
+                                intel_engine_mask_t engine_mask,
+                                unsigned int retry)
 {
        struct intel_engine_cs *engine;
        intel_engine_mask_t tmp;
@@ -583,8 +597,11 @@ static int gen8_reset_engines(struct intel_gt *gt,
        struct intel_engine_cs *engine;
        const bool reset_non_ready = retry >= 1;
        intel_engine_mask_t tmp;
+       unsigned long flags;
        int ret;
 
+       spin_lock_irqsave(&gt->uncore->lock, flags);
+
        for_each_engine_masked(engine, gt, engine_mask, tmp) {
                ret = gen8_engine_reset_prepare(engine);
                if (ret && !reset_non_ready)
@@ -612,17 +629,19 @@ static int gen8_reset_engines(struct intel_gt *gt,
         * This is best effort, so ignore any error from the initial reset.
         */
        if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES)
-               gen11_reset_engines(gt, gt->info.engine_mask, 0);
+               __gen11_reset_engines(gt, gt->info.engine_mask, 0);
 
        if (GRAPHICS_VER(gt->i915) >= 11)
-               ret = gen11_reset_engines(gt, engine_mask, retry);
+               ret = __gen11_reset_engines(gt, engine_mask, retry);
        else
-               ret = gen6_reset_engines(gt, engine_mask, retry);
+               ret = __gen6_reset_engines(gt, engine_mask, retry);
 
 skip_reset:
        for_each_engine_masked(engine, gt, engine_mask, tmp)
                gen8_engine_reset_cancel(engine);
 
+       spin_unlock_irqrestore(&gt->uncore->lock, flags);
+
        return ret;
 }
 
index 8b2c11d..1109088 100644 (file)
@@ -176,8 +176,8 @@ static int live_lrc_layout(void *arg)
                        continue;
 
                hw = shmem_pin_map(engine->default_state);
-               if (IS_ERR(hw)) {
-                       err = PTR_ERR(hw);
+               if (!hw) {
+                       err = -ENOMEM;
                        break;
                }
                hw += LRC_STATE_OFFSET / sizeof(*hw);
@@ -365,8 +365,8 @@ static int live_lrc_fixed(void *arg)
                        continue;
 
                hw = shmem_pin_map(engine->default_state);
-               if (IS_ERR(hw)) {
-                       err = PTR_ERR(hw);
+               if (!hw) {
+                       err = -ENOMEM;
                        break;
                }
                hw += LRC_STATE_OFFSET / sizeof(*hw);
index 4ef9990..29ef8af 100644 (file)
@@ -122,6 +122,9 @@ enum intel_guc_action {
        INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
        INTEL_GUC_ACTION_SCHED_ENGINE_MODE_SET = 0x1003,
        INTEL_GUC_ACTION_SCHED_ENGINE_MODE_DONE = 0x1004,
+       INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY = 0x1005,
+       INTEL_GUC_ACTION_V69_SET_CONTEXT_EXECUTION_QUANTUM = 0x1006,
+       INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT = 0x1007,
        INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008,
        INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009,
        INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES = 0x100B,
index 2c4ad4a..8c6885f 100644 (file)
@@ -310,8 +310,8 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
        if (IS_DG2(gt->i915))
                flags |= GUC_WA_DUAL_QUEUE;
 
-       /* Wa_22011802037: graphics version 12 */
-       if (GRAPHICS_VER(gt->i915) == 12)
+       /* Wa_22011802037: graphics version 11/12 */
+       if (IS_GRAPHICS_VER(gt->i915, 11, 12))
                flags |= GUC_WA_PRE_PARSER;
 
        /* Wa_16011777198:dg2 */
index 966e69a..9feda10 100644 (file)
@@ -170,6 +170,11 @@ struct intel_guc {
        /** @ads_engine_usage_size: size of engine usage in the ADS */
        u32 ads_engine_usage_size;
 
+       /** @lrc_desc_pool_v69: object allocated to hold the GuC LRC descriptor pool */
+       struct i915_vma *lrc_desc_pool_v69;
+       /** @lrc_desc_pool_vaddr_v69: contents of the GuC LRC descriptor pool */
+       void *lrc_desc_pool_vaddr_v69;
+
        /**
         * @context_lookup: used to resolve intel_context from guc_id, if a
         * context is present in this structure it is registered with the GuC
index 42cb7a9..89a7e5e 100644 (file)
@@ -203,6 +203,20 @@ struct guc_wq_item {
        u32 fence_id;
 } __packed;
 
+struct guc_process_desc_v69 {
+       u32 stage_id;
+       u64 db_base_addr;
+       u32 head;
+       u32 tail;
+       u32 error_offset;
+       u64 wq_base_addr;
+       u32 wq_size_bytes;
+       u32 wq_status;
+       u32 engine_presence;
+       u32 priority;
+       u32 reserved[36];
+} __packed;
+
 struct guc_sched_wq_desc {
        u32 head;
        u32 tail;
@@ -227,6 +241,37 @@ struct guc_ctxt_registration_info {
 };
 #define CONTEXT_REGISTRATION_FLAG_KMD  BIT(0)
 
+/* Preempt to idle on quantum expiry */
+#define CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69        BIT(0)
+
+/*
+ * GuC Context registration descriptor.
+ * FIXME: This is only required to exist during context registration.
+ * The current 1:1 between guc_lrc_desc and LRCs for the lifetime of the LRC
+ * is not required.
+ */
+struct guc_lrc_desc_v69 {
+       u32 hw_context_desc;
+       u32 slpm_perf_mode_hint;        /* SPLC v1 only */
+       u32 slpm_freq_hint;
+       u32 engine_submit_mask;         /* In logical space */
+       u8 engine_class;
+       u8 reserved0[3];
+       u32 priority;
+       u32 process_desc;
+       u32 wq_addr;
+       u32 wq_size;
+       u32 context_flags;              /* CONTEXT_REGISTRATION_* */
+       /* Time for one workload to execute. (in micro seconds) */
+       u32 execution_quantum;
+       /* Time to wait for a preemption request to complete before issuing a
+        * reset. (in micro seconds).
+        */
+       u32 preemption_timeout;
+       u32 policy_flags;               /* CONTEXT_POLICY_* */
+       u32 reserved1[19];
+} __packed;
+
 /* 32-bit KLV structure as used by policy updates and others */
 struct guc_klv_generic_dw_t {
        u32 kl;
index 1726f0f..2d9f5f1 100644 (file)
@@ -414,12 +414,15 @@ struct sync_semaphore {
 };
 
 struct parent_scratch {
-       struct guc_sched_wq_desc wq_desc;
+       union guc_descs {
+               struct guc_sched_wq_desc wq_desc;
+               struct guc_process_desc_v69 pdesc;
+       } descs;
 
        struct sync_semaphore go;
        struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
 
-       u8 unused[WQ_OFFSET - sizeof(struct guc_sched_wq_desc) -
+       u8 unused[WQ_OFFSET - sizeof(union guc_descs) -
                sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
 
        u32 wq[WQ_SIZE / sizeof(u32)];
@@ -456,17 +459,23 @@ __get_parent_scratch(struct intel_context *ce)
                   LRC_STATE_OFFSET) / sizeof(u32)));
 }
 
+static struct guc_process_desc_v69 *
+__get_process_desc_v69(struct intel_context *ce)
+{
+       struct parent_scratch *ps = __get_parent_scratch(ce);
+
+       return &ps->descs.pdesc;
+}
+
 static struct guc_sched_wq_desc *
-__get_wq_desc(struct intel_context *ce)
+__get_wq_desc_v70(struct intel_context *ce)
 {
        struct parent_scratch *ps = __get_parent_scratch(ce);
 
-       return &ps->wq_desc;
+       return &ps->descs.wq_desc;
 }
 
-static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc,
-                          struct intel_context *ce,
-                          u32 wqi_size)
+static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size)
 {
        /*
         * Check for space in work queue. Caching a value of head pointer in
@@ -476,7 +485,7 @@ static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc,
 #define AVAILABLE_SPACE        \
        CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
        if (wqi_size > AVAILABLE_SPACE) {
-               ce->parallel.guc.wqi_head = READ_ONCE(wq_desc->head);
+               ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head);
 
                if (wqi_size > AVAILABLE_SPACE)
                        return NULL;
@@ -495,11 +504,55 @@ static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
        return ce;
 }
 
+static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index)
+{
+       struct guc_lrc_desc_v69 *base = guc->lrc_desc_pool_vaddr_v69;
+
+       if (!base)
+               return NULL;
+
+       GEM_BUG_ON(index >= GUC_MAX_CONTEXT_ID);
+
+       return &base[index];
+}
+
+static int guc_lrc_desc_pool_create_v69(struct intel_guc *guc)
+{
+       u32 size;
+       int ret;
+
+       size = PAGE_ALIGN(sizeof(struct guc_lrc_desc_v69) *
+                         GUC_MAX_CONTEXT_ID);
+       ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool_v69,
+                                            (void **)&guc->lrc_desc_pool_vaddr_v69);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc)
+{
+       if (!guc->lrc_desc_pool_vaddr_v69)
+               return;
+
+       guc->lrc_desc_pool_vaddr_v69 = NULL;
+       i915_vma_unpin_and_release(&guc->lrc_desc_pool_v69, I915_VMA_RELEASE_MAP);
+}
+
 static inline bool guc_submission_initialized(struct intel_guc *guc)
 {
        return guc->submission_initialized;
 }
 
+static inline void _reset_lrc_desc_v69(struct intel_guc *guc, u32 id)
+{
+       struct guc_lrc_desc_v69 *desc = __get_lrc_desc_v69(guc, id);
+
+       if (desc)
+               memset(desc, 0, sizeof(*desc));
+}
+
 static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
 {
        return __get_context(guc, id);
@@ -526,6 +579,8 @@ static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
        if (unlikely(!guc_submission_initialized(guc)))
                return;
 
+       _reset_lrc_desc_v69(guc, id);
+
        /*
         * xarray API doesn't have xa_erase_irqsave wrapper, so calling
         * the lower level functions directly.
@@ -611,7 +666,7 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
                                              true, timeout);
 }
 
-static int guc_context_policy_init(struct intel_context *ce, bool loop);
+static int guc_context_policy_init_v70(struct intel_context *ce, bool loop);
 static int try_context_registration(struct intel_context *ce, bool loop);
 
 static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
@@ -639,7 +694,7 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
        GEM_BUG_ON(context_guc_id_invalid(ce));
 
        if (context_policy_required(ce)) {
-               err = guc_context_policy_init(ce, false);
+               err = guc_context_policy_init_v70(ce, false);
                if (err)
                        return err;
        }
@@ -737,9 +792,7 @@ static u32 wq_space_until_wrap(struct intel_context *ce)
        return (WQ_SIZE - ce->parallel.guc.wqi_tail);
 }
 
-static void write_wqi(struct guc_sched_wq_desc *wq_desc,
-                     struct intel_context *ce,
-                     u32 wqi_size)
+static void write_wqi(struct intel_context *ce, u32 wqi_size)
 {
        BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
 
@@ -750,13 +803,12 @@ static void write_wqi(struct guc_sched_wq_desc *wq_desc,
 
        ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
                (WQ_SIZE - 1);
-       WRITE_ONCE(wq_desc->tail, ce->parallel.guc.wqi_tail);
+       WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail);
 }
 
 static int guc_wq_noop_append(struct intel_context *ce)
 {
-       struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
-       u32 *wqi = get_wq_pointer(wq_desc, ce, wq_space_until_wrap(ce));
+       u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce));
        u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
 
        if (!wqi)
@@ -775,7 +827,6 @@ static int __guc_wq_item_append(struct i915_request *rq)
 {
        struct intel_context *ce = request_to_scheduling_context(rq);
        struct intel_context *child;
-       struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
        unsigned int wqi_size = (ce->parallel.number_children + 4) *
                sizeof(u32);
        u32 *wqi;
@@ -795,7 +846,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
                        return ret;
        }
 
-       wqi = get_wq_pointer(wq_desc, ce, wqi_size);
+       wqi = get_wq_pointer(ce, wqi_size);
        if (!wqi)
                return -EBUSY;
 
@@ -810,7 +861,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
        for_each_child(ce, child)
                *wqi++ = child->ring->tail / sizeof(u64);
 
-       write_wqi(wq_desc, ce, wqi_size);
+       write_wqi(ce, wqi_size);
 
        return 0;
 }
@@ -1527,87 +1578,18 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
        lrc_update_regs(ce, engine, head);
 }
 
-static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
-{
-       static const i915_reg_t _reg[I915_NUM_ENGINES] = {
-               [RCS0] = MSG_IDLE_CS,
-               [BCS0] = MSG_IDLE_BCS,
-               [VCS0] = MSG_IDLE_VCS0,
-               [VCS1] = MSG_IDLE_VCS1,
-               [VCS2] = MSG_IDLE_VCS2,
-               [VCS3] = MSG_IDLE_VCS3,
-               [VCS4] = MSG_IDLE_VCS4,
-               [VCS5] = MSG_IDLE_VCS5,
-               [VCS6] = MSG_IDLE_VCS6,
-               [VCS7] = MSG_IDLE_VCS7,
-               [VECS0] = MSG_IDLE_VECS0,
-               [VECS1] = MSG_IDLE_VECS1,
-               [VECS2] = MSG_IDLE_VECS2,
-               [VECS3] = MSG_IDLE_VECS3,
-               [CCS0] = MSG_IDLE_CS,
-               [CCS1] = MSG_IDLE_CS,
-               [CCS2] = MSG_IDLE_CS,
-               [CCS3] = MSG_IDLE_CS,
-       };
-       u32 val;
-
-       if (!_reg[engine->id].reg)
-               return 0;
-
-       val = intel_uncore_read(engine->uncore, _reg[engine->id]);
-
-       /* bits[29:25] & bits[13:9] >> shift */
-       return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT;
-}
-
-static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask)
-{
-       int ret;
-
-       /* Ensure GPM receives fw up/down after CS is stopped */
-       udelay(1);
-
-       /* Wait for forcewake request to complete in GPM */
-       ret =  __intel_wait_for_register_fw(gt->uncore,
-                                           GEN9_PWRGT_DOMAIN_STATUS,
-                                           fw_mask, fw_mask, 5000, 0, NULL);
-
-       /* Ensure CS receives fw ack from GPM */
-       udelay(1);
-
-       if (ret)
-               GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret);
-}
-
-/*
- * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any
- * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The
- * pending status is indicated by bits[13:9] (masked by bits[ 29:25]) in the
- * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we
- * are concerned only with the gt reset here, we use a logical OR of pending
- * forcewakeups from all reset domains and then wait for them to complete by
- * querying PWRGT_DOMAIN_STATUS.
- */
 static void guc_engine_reset_prepare(struct intel_engine_cs *engine)
 {
-       u32 fw_pending;
-
-       if (GRAPHICS_VER(engine->i915) != 12)
+       if (!IS_GRAPHICS_VER(engine->i915, 11, 12))
                return;
 
-       /*
-        * Wa_22011802037
-        * TODO: Occasionally trying to stop the cs times out, but does not
-        * adversely affect functionality. The timeout is set as a config
-        * parameter that defaults to 100ms. Assuming that this timeout is
-        * sufficient for any pending MI_FORCEWAKEs to complete, ignore the
-        * timeout returned here until it is root caused.
-        */
        intel_engine_stop_cs(engine);
 
-       fw_pending = __cs_pending_mi_force_wakes(engine);
-       if (fw_pending)
-               __gpm_wait_for_fw_complete(engine->gt, fw_pending);
+       /*
+        * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need
+        * to wait for any pending mi force wakeups
+        */
+       intel_engine_wait_for_pending_mi_fw(engine);
 }
 
 static void guc_reset_nop(struct intel_engine_cs *engine)
@@ -1868,20 +1850,34 @@ static void reset_fail_worker_func(struct work_struct *w);
 int intel_guc_submission_init(struct intel_guc *guc)
 {
        struct intel_gt *gt = guc_to_gt(guc);
+       int ret;
 
        if (guc->submission_initialized)
                return 0;
 
+       if (guc->fw.major_ver_found < 70) {
+               ret = guc_lrc_desc_pool_create_v69(guc);
+               if (ret)
+                       return ret;
+       }
+
        guc->submission_state.guc_ids_bitmap =
                bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
-       if (!guc->submission_state.guc_ids_bitmap)
-               return -ENOMEM;
+       if (!guc->submission_state.guc_ids_bitmap) {
+               ret = -ENOMEM;
+               goto destroy_pool;
+       }
 
        guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
        guc->timestamp.shift = gpm_timestamp_shift(gt);
        guc->submission_initialized = true;
 
        return 0;
+
+destroy_pool:
+       guc_lrc_desc_pool_destroy_v69(guc);
+
+       return ret;
 }
 
 void intel_guc_submission_fini(struct intel_guc *guc)
@@ -1890,6 +1886,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
                return;
 
        guc_flush_destroyed_contexts(guc);
+       guc_lrc_desc_pool_destroy_v69(guc);
        i915_sched_engine_put(guc->sched_engine);
        bitmap_free(guc->submission_state.guc_ids_bitmap);
        guc->submission_initialized = false;
@@ -2147,10 +2144,34 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
        spin_unlock_irqrestore(&guc->submission_state.lock, flags);
 }
 
-static int __guc_action_register_multi_lrc(struct intel_guc *guc,
-                                          struct intel_context *ce,
-                                          struct guc_ctxt_registration_info *info,
-                                          bool loop)
+static int __guc_action_register_multi_lrc_v69(struct intel_guc *guc,
+                                              struct intel_context *ce,
+                                              u32 guc_id,
+                                              u32 offset,
+                                              bool loop)
+{
+       struct intel_context *child;
+       u32 action[4 + MAX_ENGINE_INSTANCE];
+       int len = 0;
+
+       GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
+
+       action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
+       action[len++] = guc_id;
+       action[len++] = ce->parallel.number_children + 1;
+       action[len++] = offset;
+       for_each_child(ce, child) {
+               offset += sizeof(struct guc_lrc_desc_v69);
+               action[len++] = offset;
+       }
+
+       return guc_submission_send_busy_loop(guc, action, len, 0, loop);
+}
+
+static int __guc_action_register_multi_lrc_v70(struct intel_guc *guc,
+                                              struct intel_context *ce,
+                                              struct guc_ctxt_registration_info *info,
+                                              bool loop)
 {
        struct intel_context *child;
        u32 action[13 + (MAX_ENGINE_INSTANCE * 2)];
@@ -2190,9 +2211,24 @@ static int __guc_action_register_multi_lrc(struct intel_guc *guc,
        return guc_submission_send_busy_loop(guc, action, len, 0, loop);
 }
 
-static int __guc_action_register_context(struct intel_guc *guc,
-                                        struct guc_ctxt_registration_info *info,
-                                        bool loop)
+static int __guc_action_register_context_v69(struct intel_guc *guc,
+                                            u32 guc_id,
+                                            u32 offset,
+                                            bool loop)
+{
+       u32 action[] = {
+               INTEL_GUC_ACTION_REGISTER_CONTEXT,
+               guc_id,
+               offset,
+       };
+
+       return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+                                            0, loop);
+}
+
+static int __guc_action_register_context_v70(struct intel_guc *guc,
+                                            struct guc_ctxt_registration_info *info,
+                                            bool loop)
 {
        u32 action[] = {
                INTEL_GUC_ACTION_REGISTER_CONTEXT,
@@ -2213,24 +2249,52 @@ static int __guc_action_register_context(struct intel_guc *guc,
                                             0, loop);
 }
 
-static void prepare_context_registration_info(struct intel_context *ce,
-                                             struct guc_ctxt_registration_info *info);
+static void prepare_context_registration_info_v69(struct intel_context *ce);
+static void prepare_context_registration_info_v70(struct intel_context *ce,
+                                                 struct guc_ctxt_registration_info *info);
 
-static int register_context(struct intel_context *ce, bool loop)
+static int
+register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop)
+{
+       u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool_v69) +
+               ce->guc_id.id * sizeof(struct guc_lrc_desc_v69);
+
+       prepare_context_registration_info_v69(ce);
+
+       if (intel_context_is_parent(ce))
+               return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id,
+                                                          offset, loop);
+       else
+               return __guc_action_register_context_v69(guc, ce->guc_id.id,
+                                                        offset, loop);
+}
+
+static int
+register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop)
 {
        struct guc_ctxt_registration_info info;
+
+       prepare_context_registration_info_v70(ce, &info);
+
+       if (intel_context_is_parent(ce))
+               return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop);
+       else
+               return __guc_action_register_context_v70(guc, &info, loop);
+}
+
+static int register_context(struct intel_context *ce, bool loop)
+{
        struct intel_guc *guc = ce_to_guc(ce);
        int ret;
 
        GEM_BUG_ON(intel_context_is_child(ce));
        trace_intel_context_register(ce);
 
-       prepare_context_registration_info(ce, &info);
-
-       if (intel_context_is_parent(ce))
-               ret = __guc_action_register_multi_lrc(guc, ce, &info, loop);
+       if (guc->fw.major_ver_found >= 70)
+               ret = register_context_v70(guc, ce, loop);
        else
-               ret = __guc_action_register_context(guc, &info, loop);
+               ret = register_context_v69(guc, ce, loop);
+
        if (likely(!ret)) {
                unsigned long flags;
 
@@ -2238,7 +2302,8 @@ static int register_context(struct intel_context *ce, bool loop)
                set_context_registered(ce);
                spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 
-               guc_context_policy_init(ce, loop);
+               if (guc->fw.major_ver_found >= 70)
+                       guc_context_policy_init_v70(ce, loop);
        }
 
        return ret;
@@ -2335,7 +2400,7 @@ static int __guc_context_set_context_policies(struct intel_guc *guc,
                                        0, loop);
 }
 
-static int guc_context_policy_init(struct intel_context *ce, bool loop)
+static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
 {
        struct intel_engine_cs *engine = ce->engine;
        struct intel_guc *guc = &engine->gt->uc.guc;
@@ -2394,8 +2459,108 @@ static int guc_context_policy_init(struct intel_context *ce, bool loop)
        return ret;
 }
 
-static void prepare_context_registration_info(struct intel_context *ce,
-                                             struct guc_ctxt_registration_info *info)
+static void guc_context_policy_init_v69(struct intel_engine_cs *engine,
+                                       struct guc_lrc_desc_v69 *desc)
+{
+       desc->policy_flags = 0;
+
+       if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
+               desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69;
+
+       /* NB: For both of these, zero means disabled. */
+       desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
+       desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+}
+
+static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
+{
+       /*
+        * this matches the mapping we do in map_i915_prio_to_guc_prio()
+        * (e.g. prio < I915_PRIORITY_NORMAL maps to GUC_CLIENT_PRIORITY_NORMAL)
+        */
+       switch (prio) {
+       default:
+               MISSING_CASE(prio);
+               fallthrough;
+       case GUC_CLIENT_PRIORITY_KMD_NORMAL:
+               return GEN12_CTX_PRIORITY_NORMAL;
+       case GUC_CLIENT_PRIORITY_NORMAL:
+               return GEN12_CTX_PRIORITY_LOW;
+       case GUC_CLIENT_PRIORITY_HIGH:
+       case GUC_CLIENT_PRIORITY_KMD_HIGH:
+               return GEN12_CTX_PRIORITY_HIGH;
+       }
+}
+
+static void prepare_context_registration_info_v69(struct intel_context *ce)
+{
+       struct intel_engine_cs *engine = ce->engine;
+       struct intel_guc *guc = &engine->gt->uc.guc;
+       u32 ctx_id = ce->guc_id.id;
+       struct guc_lrc_desc_v69 *desc;
+       struct intel_context *child;
+
+       GEM_BUG_ON(!engine->mask);
+
+       /*
+        * Ensure LRC + CT vmas are is same region as write barrier is done
+        * based on CT vma region.
+        */
+       GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
+                  i915_gem_object_is_lmem(ce->ring->vma->obj));
+
+       desc = __get_lrc_desc_v69(guc, ctx_id);
+       desc->engine_class = engine_class_to_guc_class(engine->class);
+       desc->engine_submit_mask = engine->logical_mask;
+       desc->hw_context_desc = ce->lrc.lrca;
+       desc->priority = ce->guc_state.prio;
+       desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+       guc_context_policy_init_v69(engine, desc);
+
+       /*
+        * If context is a parent, we need to register a process descriptor
+        * describing a work queue and register all child contexts.
+        */
+       if (intel_context_is_parent(ce)) {
+               struct guc_process_desc_v69 *pdesc;
+
+               ce->parallel.guc.wqi_tail = 0;
+               ce->parallel.guc.wqi_head = 0;
+
+               desc->process_desc = i915_ggtt_offset(ce->state) +
+                       __get_parent_scratch_offset(ce);
+               desc->wq_addr = i915_ggtt_offset(ce->state) +
+                       __get_wq_offset(ce);
+               desc->wq_size = WQ_SIZE;
+
+               pdesc = __get_process_desc_v69(ce);
+               memset(pdesc, 0, sizeof(*(pdesc)));
+               pdesc->stage_id = ce->guc_id.id;
+               pdesc->wq_base_addr = desc->wq_addr;
+               pdesc->wq_size_bytes = desc->wq_size;
+               pdesc->wq_status = WQ_STATUS_ACTIVE;
+
+               ce->parallel.guc.wq_head = &pdesc->head;
+               ce->parallel.guc.wq_tail = &pdesc->tail;
+               ce->parallel.guc.wq_status = &pdesc->wq_status;
+
+               for_each_child(ce, child) {
+                       desc = __get_lrc_desc_v69(guc, child->guc_id.id);
+
+                       desc->engine_class =
+                               engine_class_to_guc_class(engine->class);
+                       desc->hw_context_desc = child->lrc.lrca;
+                       desc->priority = ce->guc_state.prio;
+                       desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+                       guc_context_policy_init_v69(engine, desc);
+               }
+
+               clear_children_join_go_memory(ce);
+       }
+}
+
+static void prepare_context_registration_info_v70(struct intel_context *ce,
+                                                 struct guc_ctxt_registration_info *info)
 {
        struct intel_engine_cs *engine = ce->engine;
        struct intel_guc *guc = &engine->gt->uc.guc;
@@ -2420,6 +2585,8 @@ static void prepare_context_registration_info(struct intel_context *ce,
         */
        info->hwlrca_lo = lower_32_bits(ce->lrc.lrca);
        info->hwlrca_hi = upper_32_bits(ce->lrc.lrca);
+       if (engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
+               info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio);
        info->flags = CONTEXT_REGISTRATION_FLAG_KMD;
 
        /*
@@ -2443,10 +2610,14 @@ static void prepare_context_registration_info(struct intel_context *ce,
                info->wq_base_hi = upper_32_bits(wq_base_offset);
                info->wq_size = WQ_SIZE;
 
-               wq_desc = __get_wq_desc(ce);
+               wq_desc = __get_wq_desc_v70(ce);
                memset(wq_desc, 0, sizeof(*wq_desc));
                wq_desc->wq_status = WQ_STATUS_ACTIVE;
 
+               ce->parallel.guc.wq_head = &wq_desc->head;
+               ce->parallel.guc.wq_tail = &wq_desc->tail;
+               ce->parallel.guc.wq_status = &wq_desc->wq_status;
+
                clear_children_join_go_memory(ce);
        }
 }
@@ -2761,11 +2932,21 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
                                                 u16 guc_id,
                                                 u32 preemption_timeout)
 {
-       struct context_policy policy;
+       if (guc->fw.major_ver_found >= 70) {
+               struct context_policy policy;
 
-       __guc_context_policy_start_klv(&policy, guc_id);
-       __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
-       __guc_context_set_context_policies(guc, &policy, true);
+               __guc_context_policy_start_klv(&policy, guc_id);
+               __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+               __guc_context_set_context_policies(guc, &policy, true);
+       } else {
+               u32 action[] = {
+                       INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT,
+                       guc_id,
+                       preemption_timeout
+               };
+
+               intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+       }
 }
 
 static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
@@ -3013,11 +3194,21 @@ static int guc_context_alloc(struct intel_context *ce)
 static void __guc_context_set_prio(struct intel_guc *guc,
                                   struct intel_context *ce)
 {
-       struct context_policy policy;
+       if (guc->fw.major_ver_found >= 70) {
+               struct context_policy policy;
 
-       __guc_context_policy_start_klv(&policy, ce->guc_id.id);
-       __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
-       __guc_context_set_context_policies(guc, &policy, true);
+               __guc_context_policy_start_klv(&policy, ce->guc_id.id);
+               __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
+               __guc_context_set_context_policies(guc, &policy, true);
+       } else {
+               u32 action[] = {
+                       INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY,
+                       ce->guc_id.id,
+                       ce->guc_state.prio,
+               };
+
+               guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+       }
 }
 
 static void guc_context_set_prio(struct intel_guc *guc,
@@ -4527,17 +4718,19 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
                guc_log_context_priority(p, ce);
 
                if (intel_context_is_parent(ce)) {
-                       struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
                        struct intel_context *child;
 
                        drm_printf(p, "\t\tNumber children: %u\n",
                                   ce->parallel.number_children);
-                       drm_printf(p, "\t\tWQI Head: %u\n",
-                                  READ_ONCE(wq_desc->head));
-                       drm_printf(p, "\t\tWQI Tail: %u\n",
-                                  READ_ONCE(wq_desc->tail));
-                       drm_printf(p, "\t\tWQI Status: %u\n\n",
-                                  READ_ONCE(wq_desc->wq_status));
+
+                       if (ce->parallel.guc.wq_status) {
+                               drm_printf(p, "\t\tWQI Head: %u\n",
+                                          READ_ONCE(*ce->parallel.guc.wq_head));
+                               drm_printf(p, "\t\tWQI Tail: %u\n",
+                                          READ_ONCE(*ce->parallel.guc.wq_tail));
+                               drm_printf(p, "\t\tWQI Status: %u\n\n",
+                                          READ_ONCE(*ce->parallel.guc.wq_status));
+                       }
 
                        if (ce->engine->emit_bb_start ==
                            emit_bb_start_parent_no_preempt_mid_batch) {
index d078f88..703f42b 100644 (file)
@@ -70,6 +70,10 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
        fw_def(BROXTON,      0, guc_def(bxt,  70, 1, 1)) \
        fw_def(SKYLAKE,      0, guc_def(skl,  70, 1, 1))
 
+#define INTEL_GUC_FIRMWARE_DEFS_FALLBACK(fw_def, guc_def) \
+       fw_def(ALDERLAKE_P,  0, guc_def(adlp, 69, 0, 3)) \
+       fw_def(ALDERLAKE_S,  0, guc_def(tgl,  69, 0, 3))
+
 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
        fw_def(ALDERLAKE_P,  0, huc_def(tgl,  7, 9, 3)) \
        fw_def(ALDERLAKE_S,  0, huc_def(tgl,  7, 9, 3)) \
@@ -105,6 +109,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
        MODULE_FIRMWARE(uc_);
 
 INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
+INTEL_GUC_FIRMWARE_DEFS_FALLBACK(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
 INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
 
 /* The below structs and macros are used to iterate across the list of blobs */
@@ -149,6 +154,9 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
        static const struct uc_fw_platform_requirement blobs_guc[] = {
                INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
        };
+       static const struct uc_fw_platform_requirement blobs_guc_fallback[] = {
+               INTEL_GUC_FIRMWARE_DEFS_FALLBACK(MAKE_FW_LIST, GUC_FW_BLOB)
+       };
        static const struct uc_fw_platform_requirement blobs_huc[] = {
                INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
        };
@@ -156,12 +164,21 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
                [INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
                [INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
        };
-       static const struct uc_fw_platform_requirement *fw_blobs;
+       const struct uc_fw_platform_requirement *fw_blobs;
        enum intel_platform p = INTEL_INFO(i915)->platform;
        u32 fw_count;
        u8 rev = INTEL_REVID(i915);
        int i;
 
+       /*
+        * The only difference between the ADL GuC FWs is the HWConfig support.
+        * ADL-N does not support HWConfig, so we should use the same binary as
+        * ADL-S, otherwise the GuC might attempt to fetch a config table that
+        * does not exist.
+        */
+       if (IS_ADLP_N(i915))
+               p = INTEL_ALDERLAKE_S;
+
        GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
        fw_blobs = blobs_all[uc_fw->type].blobs;
        fw_count = blobs_all[uc_fw->type].count;
@@ -170,12 +187,29 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
                if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
                        const struct uc_fw_blob *blob = &fw_blobs[i].blob;
                        uc_fw->path = blob->path;
+                       uc_fw->wanted_path = blob->path;
                        uc_fw->major_ver_wanted = blob->major;
                        uc_fw->minor_ver_wanted = blob->minor;
                        break;
                }
        }
 
+       if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) {
+               const struct uc_fw_platform_requirement *blobs = blobs_guc_fallback;
+               u32 count = ARRAY_SIZE(blobs_guc_fallback);
+
+               for (i = 0; i < count && p <= blobs[i].p; i++) {
+                       if (p == blobs[i].p && rev >= blobs[i].rev) {
+                               const struct uc_fw_blob *blob = &blobs[i].blob;
+
+                               uc_fw->fallback.path = blob->path;
+                               uc_fw->fallback.major_ver = blob->major;
+                               uc_fw->fallback.minor_ver = blob->minor;
+                               break;
+                       }
+               }
+       }
+
        /* make sure the list is ordered as expected */
        if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
                for (i = 1; i < fw_count; i++) {
@@ -329,7 +363,24 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
        __force_fw_fetch_failures(uc_fw, -EINVAL);
        __force_fw_fetch_failures(uc_fw, -ESTALE);
 
-       err = request_firmware(&fw, uc_fw->path, dev);
+       err = firmware_request_nowarn(&fw, uc_fw->path, dev);
+       if (err && !intel_uc_fw_is_overridden(uc_fw) && uc_fw->fallback.path) {
+               err = firmware_request_nowarn(&fw, uc_fw->fallback.path, dev);
+               if (!err) {
+                       drm_notice(&i915->drm,
+                                  "%s firmware %s is recommended, but only %s was found\n",
+                                  intel_uc_fw_type_repr(uc_fw->type),
+                                  uc_fw->wanted_path,
+                                  uc_fw->fallback.path);
+                       drm_info(&i915->drm,
+                                "Consider updating your linux-firmware pkg or downloading from %s\n",
+                                INTEL_UC_FIRMWARE_URL);
+
+                       uc_fw->path = uc_fw->fallback.path;
+                       uc_fw->major_ver_wanted = uc_fw->fallback.major_ver;
+                       uc_fw->minor_ver_wanted = uc_fw->fallback.minor_ver;
+               }
+       }
        if (err)
                goto fail;
 
@@ -428,8 +479,8 @@ fail:
                                  INTEL_UC_FIRMWARE_MISSING :
                                  INTEL_UC_FIRMWARE_ERROR);
 
-       drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
-                  intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+       i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
+                        intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
        drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
                 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
 
@@ -787,7 +838,13 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
 {
        drm_printf(p, "%s firmware: %s\n",
-                  intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+                  intel_uc_fw_type_repr(uc_fw->type), uc_fw->wanted_path);
+       if (uc_fw->fallback.path) {
+               drm_printf(p, "%s firmware fallback: %s\n",
+                          intel_uc_fw_type_repr(uc_fw->type), uc_fw->fallback.path);
+               drm_printf(p, "fallback selected: %s\n",
+                          str_yes_no(uc_fw->path == uc_fw->fallback.path));
+       }
        drm_printf(p, "\tstatus: %s\n",
                   intel_uc_fw_status_repr(uc_fw->status));
        drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
index 3229018..562acdf 100644 (file)
@@ -74,6 +74,7 @@ struct intel_uc_fw {
                const enum intel_uc_fw_status status;
                enum intel_uc_fw_status __status; /* no accidental overwrites */
        };
+       const char *wanted_path;
        const char *path;
        bool user_overridden;
        size_t size;
@@ -98,6 +99,12 @@ struct intel_uc_fw {
        u16 major_ver_found;
        u16 minor_ver_found;
 
+       struct {
+               const char *path;
+               u16 major_ver;
+               u16 minor_ver;
+       } fallback;
+
        u32 rsa_size;
        u32 ucode_size;
 
index b9eb75a..1c35a41 100644 (file)
@@ -3117,9 +3117,9 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
                        continue;
 
                vaddr = shmem_pin_map(engine->default_state);
-               if (IS_ERR(vaddr)) {
-                       gvt_err("failed to map %s->default state, err:%zd\n",
-                               engine->name, PTR_ERR(vaddr));
+               if (!vaddr) {
+                       gvt_err("failed to map %s->default state\n",
+                               engine->name);
                        return;
                }
 
index 90b0ce5..1041b53 100644 (file)
@@ -530,6 +530,7 @@ mask_err:
 static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 {
        struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+       struct pci_dev *root_pdev;
        int ret;
 
        if (i915_inject_probe_failure(dev_priv))
@@ -641,6 +642,15 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 
        intel_bw_init_hw(dev_priv);
 
+       /*
+        * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
+        * This should be totally removed when we handle the pci states properly
+        * on runtime PM and on s2idle cases.
+        */
+       root_pdev = pcie_find_root_port(pdev);
+       if (root_pdev)
+               pci_d3cold_disable(root_pdev);
+
        return 0;
 
 err_msi:
@@ -664,11 +674,16 @@ err_perf:
 static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
 {
        struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+       struct pci_dev *root_pdev;
 
        i915_perf_fini(dev_priv);
 
        if (pdev->msi_enabled)
                pci_disable_msi(pdev);
+
+       root_pdev = pcie_find_root_port(pdev);
+       if (root_pdev)
+               pci_d3cold_enable(root_pdev);
 }
 
 /**
@@ -1193,14 +1208,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
                goto out;
        }
 
-       /*
-        * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
-        * This should be totally removed when we handle the pci states properly
-        * on runtime PM and on s2idle cases.
-        */
-       if (suspend_to_idle(dev_priv))
-               pci_d3cold_disable(pdev);
-
        pci_disable_device(pdev);
        /*
         * During hibernation on some platforms the BIOS may try to access
@@ -1365,8 +1372,6 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
        pci_set_master(pdev);
 
-       pci_d3cold_enable(pdev);
-
        disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
 
        ret = vlv_resume_prepare(dev_priv, false);
@@ -1543,7 +1548,6 @@ static int intel_runtime_suspend(struct device *kdev)
 {
        struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
-       struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
        int ret;
 
        if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
@@ -1589,12 +1593,6 @@ static int intel_runtime_suspend(struct device *kdev)
                drm_err(&dev_priv->drm,
                        "Unclaimed access detected prior to suspending\n");
 
-       /*
-        * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
-        * This should be totally removed when we handle the pci states properly
-        * on runtime PM and on s2idle cases.
-        */
-       pci_d3cold_disable(pdev);
        rpm->suspended = true;
 
        /*
@@ -1633,7 +1631,6 @@ static int intel_runtime_resume(struct device *kdev)
 {
        struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
-       struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
        int ret;
 
        if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
@@ -1646,7 +1643,6 @@ static int intel_runtime_resume(struct device *kdev)
 
        intel_opregion_notify_adapter(dev_priv, PCI_D0);
        rpm->suspended = false;
-       pci_d3cold_enable(pdev);
        if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
                drm_dbg(&dev_priv->drm,
                        "Unclaimed access during suspend, bios?\n");
index 18d38cb..b09d1d3 100644 (file)
@@ -116,8 +116,9 @@ show_client_class(struct seq_file *m,
                total += busy_add(ctx, class);
        rcu_read_unlock();
 
-       seq_printf(m, "drm-engine-%s:\t%llu ns\n",
-                  uabi_class_names[class], total);
+       if (capacity)
+               seq_printf(m, "drm-engine-%s:\t%llu ns\n",
+                          uabi_class_names[class], total);
 
        if (capacity > 1)
                seq_printf(m, "drm-engine-capacity-%s:\t%u\n",
index 159571b..dcc0818 100644 (file)
@@ -68,6 +68,7 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
  * drm_mm_node
  * @node: The drm_mm_node.
  * @region_start: An offset to add to the dma addresses of the sg list.
+ * @page_alignment: Required page alignment for each sg entry. Power of two.
  *
  * Create a struct sg_table, initializing it from a struct drm_mm_node,
  * taking a maximum segment length into account, splitting into segments
@@ -77,22 +78,25 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
  * error code cast to an error pointer on failure.
  */
 struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
-                                             u64 region_start)
+                                             u64 region_start,
+                                             u32 page_alignment)
 {
-       const u64 max_segment = SZ_1G; /* Do we have a limit on this? */
-       u64 segment_pages = max_segment >> PAGE_SHIFT;
+       const u32 max_segment = round_down(UINT_MAX, page_alignment);
+       const u32 segment_pages = max_segment >> PAGE_SHIFT;
        u64 block_size, offset, prev_end;
        struct i915_refct_sgt *rsgt;
        struct sg_table *st;
        struct scatterlist *sg;
 
+       GEM_BUG_ON(!max_segment);
+
        rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
        if (!rsgt)
                return ERR_PTR(-ENOMEM);
 
        i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
        st = &rsgt->table;
-       if (sg_alloc_table(st, DIV_ROUND_UP(node->size, segment_pages),
+       if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages),
                           GFP_KERNEL)) {
                i915_refct_sgt_put(rsgt);
                return ERR_PTR(-ENOMEM);
@@ -112,12 +116,14 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
                                sg = __sg_next(sg);
 
                        sg_dma_address(sg) = region_start + offset;
+                       GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg),
+                                              page_alignment));
                        sg_dma_len(sg) = 0;
                        sg->length = 0;
                        st->nents++;
                }
 
-               len = min(block_size, max_segment - sg->length);
+               len = min_t(u64, block_size, max_segment - sg->length);
                sg->length += len;
                sg_dma_len(sg) += len;
 
@@ -138,6 +144,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
  * i915_buddy_block list
  * @res: The struct i915_ttm_buddy_resource.
  * @region_start: An offset to add to the dma addresses of the sg list.
+ * @page_alignment: Required page alignment for each sg entry. Power of two.
  *
  * Create a struct sg_table, initializing it from struct i915_buddy_block list,
  * taking a maximum segment length into account, splitting into segments
@@ -147,11 +154,12 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
  * error code cast to an error pointer on failure.
  */
 struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
-                                                    u64 region_start)
+                                                    u64 region_start,
+                                                    u32 page_alignment)
 {
        struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
        const u64 size = res->num_pages << PAGE_SHIFT;
-       const u64 max_segment = rounddown(UINT_MAX, PAGE_SIZE);
+       const u32 max_segment = round_down(UINT_MAX, page_alignment);
        struct drm_buddy *mm = bman_res->mm;
        struct list_head *blocks = &bman_res->blocks;
        struct drm_buddy_block *block;
@@ -161,6 +169,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
        resource_size_t prev_end;
 
        GEM_BUG_ON(list_empty(blocks));
+       GEM_BUG_ON(!max_segment);
 
        rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
        if (!rsgt)
@@ -191,12 +200,14 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
                                        sg = __sg_next(sg);
 
                                sg_dma_address(sg) = region_start + offset;
+                               GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg),
+                                                      page_alignment));
                                sg_dma_len(sg) = 0;
                                sg->length = 0;
                                st->nents++;
                        }
 
-                       len = min(block_size, max_segment - sg->length);
+                       len = min_t(u64, block_size, max_segment - sg->length);
                        sg->length += len;
                        sg_dma_len(sg) += len;
 
index 12c6a16..9ddb3e7 100644 (file)
@@ -213,9 +213,11 @@ static inline void __i915_refct_sgt_init(struct i915_refct_sgt *rsgt,
 void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size);
 
 struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
-                                             u64 region_start);
+                                             u64 region_start,
+                                             u32 page_alignment);
 
 struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
-                                                    u64 region_start);
+                                                    u64 region_start,
+                                                    u32 page_alignment);
 
 #endif
index 8521dab..1e27502 100644 (file)
@@ -166,7 +166,14 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
        struct device *kdev = kobj_to_dev(kobj);
        struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
        struct i915_gpu_coredump *gpu;
-       ssize_t ret;
+       ssize_t ret = 0;
+
+       /*
+        * FIXME: Concurrent clients triggering resets and reading + clearing
+        * dumps can cause inconsistent sysfs reads when a user calls in with a
+        * non-zero offset to complete a prior partial read but the
+        * gpu_coredump has been cleared or replaced.
+        */
 
        gpu = i915_first_error_state(i915);
        if (IS_ERR(gpu)) {
@@ -178,8 +185,10 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
                const char *str = "No error state collected\n";
                size_t len = strlen(str);
 
-               ret = min_t(size_t, count, len - off);
-               memcpy(buf, str + off, ret);
+               if (off < len) {
+                       ret = min_t(size_t, count, len - off);
+                       memcpy(buf, str + off, ret);
+               }
        }
 
        return ret;
@@ -259,4 +268,6 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
 
        device_remove_bin_file(kdev,  &dpf_attrs_1);
        device_remove_bin_file(kdev,  &dpf_attrs);
+
+       kobject_put(dev_priv->sysfs_gt);
 }
index 4f6db53..04d12f2 100644 (file)
@@ -23,6 +23,7 @@
  */
 
 #include <linux/sched/mm.h>
+#include <linux/dma-fence-array.h>
 #include <drm/drm_gem.h>
 
 #include "display/intel_frontbuffer.h"
@@ -1636,10 +1637,10 @@ static void force_unbind(struct i915_vma *vma)
        GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
 }
 
-static void release_references(struct i915_vma *vma, bool vm_ddestroy)
+static void release_references(struct i915_vma *vma, struct intel_gt *gt,
+                              bool vm_ddestroy)
 {
        struct drm_i915_gem_object *obj = vma->obj;
-       struct intel_gt *gt = vma->vm->gt;
 
        GEM_BUG_ON(i915_vma_is_active(vma));
 
@@ -1694,11 +1695,12 @@ void i915_vma_destroy_locked(struct i915_vma *vma)
 
        force_unbind(vma);
        list_del_init(&vma->vm_link);
-       release_references(vma, false);
+       release_references(vma, vma->vm->gt, false);
 }
 
 void i915_vma_destroy(struct i915_vma *vma)
 {
+       struct intel_gt *gt;
        bool vm_ddestroy;
 
        mutex_lock(&vma->vm->mutex);
@@ -1706,8 +1708,11 @@ void i915_vma_destroy(struct i915_vma *vma)
        list_del_init(&vma->vm_link);
        vm_ddestroy = vma->vm_ddestroy;
        vma->vm_ddestroy = false;
+
+       /* vma->vm may be freed when releasing vma->vm->mutex. */
+       gt = vma->vm->gt;
        mutex_unlock(&vma->vm->mutex);
-       release_references(vma, vm_ddestroy);
+       release_references(vma, gt, vm_ddestroy);
 }
 
 void i915_vma_parked(struct intel_gt *gt)
@@ -1823,6 +1828,21 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
        if (unlikely(err))
                return err;
 
+       /*
+        * Reserve fences slot early to prevent an allocation after preparing
+        * the workload and associating fences with dma_resv.
+        */
+       if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
+               struct dma_fence *curr;
+               int idx;
+
+               dma_fence_array_for_each(curr, idx, fence)
+                       ;
+               err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
+               if (unlikely(err))
+                       return err;
+       }
+
        if (flags & EXEC_OBJECT_WRITE) {
                struct intel_frontbuffer *front;
 
@@ -1832,31 +1852,23 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
                                i915_active_add_request(&front->write, rq);
                        intel_frontbuffer_put(front);
                }
+       }
 
-               if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
-                       err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
-                       if (unlikely(err))
-                               return err;
-               }
+       if (fence) {
+               struct dma_fence *curr;
+               enum dma_resv_usage usage;
+               int idx;
 
-               if (fence) {
-                       dma_resv_add_fence(vma->obj->base.resv, fence,
-                                          DMA_RESV_USAGE_WRITE);
+               obj->read_domains = 0;
+               if (flags & EXEC_OBJECT_WRITE) {
+                       usage = DMA_RESV_USAGE_WRITE;
                        obj->write_domain = I915_GEM_DOMAIN_RENDER;
-                       obj->read_domains = 0;
-               }
-       } else {
-               if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
-                       err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
-                       if (unlikely(err))
-                               return err;
+               } else {
+                       usage = DMA_RESV_USAGE_READ;
                }
 
-               if (fence) {
-                       dma_resv_add_fence(vma->obj->base.resv, fence,
-                                          DMA_RESV_USAGE_READ);
-                       obj->write_domain = 0;
-               }
+               dma_fence_array_for_each(curr, idx, fence)
+                       dma_resv_add_fence(vma->obj->base.resv, curr, usage);
        }
 
        if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
index 62ff774..575d67b 100644 (file)
@@ -152,6 +152,7 @@ int intel_region_ttm_fini(struct intel_memory_region *mem)
  * Convert an opaque TTM resource manager resource to a refcounted sg_table.
  * @mem: The memory region.
  * @res: The resource manager resource obtained from the TTM resource manager.
+ * @page_alignment: Required page alignment for each sg entry. Power of two.
  *
  * The gem backends typically use sg-tables for operations on the underlying
  * io_memory. So provide a way for the backends to translate the
@@ -161,16 +162,19 @@ int intel_region_ttm_fini(struct intel_memory_region *mem)
  */
 struct i915_refct_sgt *
 intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
-                                 struct ttm_resource *res)
+                                 struct ttm_resource *res,
+                                 u32 page_alignment)
 {
        if (mem->is_range_manager) {
                struct ttm_range_mgr_node *range_node =
                        to_ttm_range_mgr_node(res);
 
                return i915_rsgt_from_mm_node(&range_node->mm_nodes[0],
-                                             mem->region.start);
+                                             mem->region.start,
+                                             page_alignment);
        } else {
-               return i915_rsgt_from_buddy_resource(res, mem->region.start);
+               return i915_rsgt_from_buddy_resource(res, mem->region.start,
+                                                    page_alignment);
        }
 }
 
index cf9d86d..5bb8d8b 100644 (file)
@@ -24,7 +24,8 @@ int intel_region_ttm_fini(struct intel_memory_region *mem);
 
 struct i915_refct_sgt *
 intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
-                                 struct ttm_resource *res);
+                                 struct ttm_resource *res,
+                                 u32 page_alignment);
 
 void intel_region_ttm_resource_free(struct intel_memory_region *mem,
                                    struct ttm_resource *res);
index 8633bec..ab9f17f 100644 (file)
@@ -742,7 +742,7 @@ static int pot_hole(struct i915_address_space *vm,
                u64 addr;
 
                for (addr = round_up(hole_start + min_alignment, step) - min_alignment;
-                    addr <= round_down(hole_end - (2 * min_alignment), step) - min_alignment;
+                    hole_end > addr && hole_end - addr >= 2 * min_alignment;
                     addr += step) {
                        err = i915_vma_pin(vma, 0, 0, addr | flags);
                        if (err) {
index 73eb53e..3b18e59 100644 (file)
@@ -451,7 +451,6 @@ out_put:
 
 static int igt_mock_max_segment(void *arg)
 {
-       const unsigned int max_segment = rounddown(UINT_MAX, PAGE_SIZE);
        struct intel_memory_region *mem = arg;
        struct drm_i915_private *i915 = mem->i915;
        struct i915_ttm_buddy_resource *res;
@@ -460,7 +459,10 @@ static int igt_mock_max_segment(void *arg)
        struct drm_buddy *mm;
        struct list_head *blocks;
        struct scatterlist *sg;
+       I915_RND_STATE(prng);
        LIST_HEAD(objects);
+       unsigned int max_segment;
+       unsigned int ps;
        u64 size;
        int err = 0;
 
@@ -472,7 +474,13 @@ static int igt_mock_max_segment(void *arg)
         */
 
        size = SZ_8G;
-       mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0, 0);
+       ps = PAGE_SIZE;
+       if (i915_prandom_u64_state(&prng) & 1)
+               ps = SZ_64K; /* For something like DG2 */
+
+       max_segment = round_down(UINT_MAX, ps);
+
+       mem = mock_region_create(i915, 0, size, ps, 0, 0);
        if (IS_ERR(mem))
                return PTR_ERR(mem);
 
@@ -498,12 +506,21 @@ static int igt_mock_max_segment(void *arg)
        }
 
        for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+               dma_addr_t daddr = sg_dma_address(sg);
+
                if (sg->length > max_segment) {
                        pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
                               __func__, sg->length, max_segment);
                        err = -EINVAL;
                        goto out_close;
                }
+
+               if (!IS_ALIGNED(daddr, ps)) {
+                       pr_err("%s: Created an unaligned scatterlist entry, addr=%pa, ps=%u\n",
+                              __func__,  &daddr, ps);
+                       err = -EINVAL;
+                       goto out_close;
+               }
        }
 
 out_close:
index 670557c..bac21fe 100644 (file)
@@ -33,7 +33,8 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj)
                return PTR_ERR(obj->mm.res);
 
        obj->mm.rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
-                                                        obj->mm.res);
+                                                        obj->mm.res,
+                                                        obj->mm.region->min_page_size);
        if (IS_ERR(obj->mm.rsgt)) {
                err = PTR_ERR(obj->mm.rsgt);
                goto err_free_resource;
index c849533..3f5750c 100644 (file)
@@ -207,6 +207,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
 
        ret = dcss_submodules_init(dcss);
        if (ret) {
+               of_node_put(dcss->of_port);
                dev_err(dev, "submodules initialization failed\n");
                goto clks_err;
        }
@@ -237,6 +238,8 @@ void dcss_dev_destroy(struct dcss_dev *dcss)
                dcss_clocks_disable(dcss);
        }
 
+       of_node_put(dcss->of_port);
+
        pm_runtime_disable(dcss->dev);
 
        dcss_submodules_stop(dcss);
index 4e665c8..efe9840 100644 (file)
@@ -498,10 +498,15 @@ int adreno_hw_init(struct msm_gpu *gpu)
 
                ring->cur = ring->start;
                ring->next = ring->start;
-
-               /* reset completed fence seqno: */
-               ring->memptrs->fence = ring->fctx->completed_fence;
                ring->memptrs->rptr = 0;
+
+               /* Detect and clean up an impossible fence, ie. if GPU managed
+                * to scribble something invalid, we don't want that to confuse
+                * us into mistakingly believing that submits have completed.
+                */
+               if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) {
+                       ring->memptrs->fence = ring->fctx->last_fence;
+               }
        }
 
        return 0;
@@ -1057,7 +1062,8 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
        for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
                release_firmware(adreno_gpu->fw[i]);
 
-       pm_runtime_disable(&priv->gpu_pdev->dev);
+       if (pm_runtime_enabled(&priv->gpu_pdev->dev))
+               pm_runtime_disable(&priv->gpu_pdev->dev);
 
        msm_gpu_cleanup(&adreno_gpu->base);
 }
index 3a462e3..a1b8c45 100644 (file)
@@ -1251,12 +1251,13 @@ static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
        DPU_ATRACE_BEGIN("encoder_vblank_callback");
        dpu_enc = to_dpu_encoder_virt(drm_enc);
 
+       atomic_inc(&phy_enc->vsync_cnt);
+
        spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
        if (dpu_enc->crtc)
                dpu_crtc_vblank_callback(dpu_enc->crtc);
        spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
 
-       atomic_inc(&phy_enc->vsync_cnt);
        DPU_ATRACE_END("encoder_vblank_callback");
 }
 
index 59da348..0ec809a 100644 (file)
@@ -252,11 +252,6 @@ static int dpu_encoder_phys_wb_atomic_check(
        DPU_DEBUG("[atomic_check:%d, \"%s\",%d,%d]\n",
                        phys_enc->wb_idx, mode->name, mode->hdisplay, mode->vdisplay);
 
-       if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
-               return 0;
-
-       fb = conn_state->writeback_job->fb;
-
        if (!conn_state || !conn_state->connector) {
                DPU_ERROR("invalid connector state\n");
                return -EINVAL;
@@ -267,6 +262,11 @@ static int dpu_encoder_phys_wb_atomic_check(
                return -EINVAL;
        }
 
+       if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+               return 0;
+
+       fb = conn_state->writeback_job->fb;
+
        DPU_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
                        fb->width, fb->height);
 
index 399115e..2fd7870 100644 (file)
@@ -11,7 +11,14 @@ static int dpu_wb_conn_get_modes(struct drm_connector *connector)
        struct msm_drm_private *priv = dev->dev_private;
        struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
 
-       return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_linewidth,
+       /*
+        * We should ideally be limiting the modes only to the maxlinewidth but
+        * on some chipsets this will allow even 4k modes to be added which will
+        * fail the per SSPP bandwidth checks. So, till we have dual-SSPP support
+        * and source split support added lets limit the modes based on max_mixer_width
+        * as 4K modes can then be supported.
+        */
+       return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_mixer_width,
                        dev->mode_config.max_height);
 }
 
index fb48c8c..17cb1fc 100644 (file)
@@ -216,6 +216,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
                encoder = mdp4_lcdc_encoder_init(dev, panel_node);
                if (IS_ERR(encoder)) {
                        DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
+                       of_node_put(panel_node);
                        return PTR_ERR(encoder);
                }
 
@@ -225,6 +226,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
                connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
                if (IS_ERR(connector)) {
                        DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
+                       of_node_put(panel_node);
                        return PTR_ERR(connector);
                }
 
index b7f5b8d..7032493 100644 (file)
@@ -1534,6 +1534,8 @@ end:
        return ret;
 }
 
+static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl);
+
 static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
 {
        int ret = 0;
@@ -1557,7 +1559,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
 
        ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
        if (!ret)
-               ret = dp_ctrl_on_stream(&ctrl->dp_ctrl);
+               ret = dp_ctrl_on_stream_phy_test_report(&ctrl->dp_ctrl);
        else
                DRM_ERROR("failed to enable DP link controller\n");
 
@@ -1813,7 +1815,27 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
        return dp_ctrl_setup_main_link(ctrl, &training_step);
 }
 
-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
+static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl)
+{
+       int ret;
+       struct dp_ctrl_private *ctrl;
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+       ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+
+       ret = dp_ctrl_enable_stream_clocks(ctrl);
+       if (ret) {
+               DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+               return ret;
+       }
+
+       dp_ctrl_send_phy_test_pattern(ctrl);
+
+       return 0;
+}
+
+int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
 {
        int ret = 0;
        bool mainlink_ready = false;
@@ -1849,12 +1871,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
                goto end;
        }
 
-       if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
-               dp_ctrl_send_phy_test_pattern(ctrl);
-               return 0;
-       }
-
-       if (!dp_ctrl_channel_eq_ok(ctrl))
+       if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl))
                dp_ctrl_link_retrain(ctrl);
 
        /* stop txing train pattern to end link training */
index 0745fde..b563e2e 100644 (file)
@@ -21,7 +21,7 @@ struct dp_ctrl {
 };
 
 int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train);
 int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
 int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl);
 int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
index bce7793..239c8e3 100644 (file)
@@ -309,12 +309,15 @@ static void dp_display_unbind(struct device *dev, struct device *master,
        struct msm_drm_private *priv = dev_get_drvdata(master);
 
        /* disable all HPD interrupts */
-       dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
+       if (dp->core_initialized)
+               dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
 
        kthread_stop(dp->ev_tsk);
 
        dp_power_client_deinit(dp->power);
        dp_aux_unregister(dp->aux);
+       dp->drm_dev = NULL;
+       dp->aux->drm_dev = NULL;
        priv->dp[dp->id] = NULL;
 }
 
@@ -872,7 +875,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data)
                return 0;
        }
 
-       rc = dp_ctrl_on_stream(dp->ctrl);
+       rc = dp_ctrl_on_stream(dp->ctrl, data);
        if (!rc)
                dp_display->power_on = true;
 
@@ -1659,6 +1662,7 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
        int rc = 0;
        struct dp_display_private *dp_display;
        u32 state;
+       bool force_link_train = false;
 
        dp_display = container_of(dp, struct dp_display_private, dp_display);
        if (!dp_display->dp_mode.drm_mode.clock) {
@@ -1693,10 +1697,12 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
 
        state =  dp_display->hpd_state;
 
-       if (state == ST_DISPLAY_OFF)
+       if (state == ST_DISPLAY_OFF) {
                dp_display_host_phy_init(dp_display);
+               force_link_train = true;
+       }
 
-       dp_display_enable(dp_display, 0);
+       dp_display_enable(dp_display, force_link_train);
 
        rc = dp_display_post_enable(dp);
        if (rc) {
@@ -1705,10 +1711,6 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
                dp_display_unprepare(dp);
        }
 
-       /* manual kick off plug event to train link */
-       if (state == ST_DISPLAY_OFF)
-               dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0);
-
        /* completed connection */
        dp_display->hpd_state = ST_CONNECTED;
 
index 4448536..14ab9a6 100644 (file)
@@ -964,7 +964,7 @@ static const struct drm_driver msm_driver = {
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
-       .gem_prime_mmap     = drm_gem_prime_mmap,
+       .gem_prime_mmap     = msm_gem_prime_mmap,
 #ifdef CONFIG_DEBUG_FS
        .debugfs_init       = msm_debugfs_init,
 #endif
index 08388d7..099a67d 100644 (file)
@@ -246,6 +246,7 @@ unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_t
 void msm_gem_shrinker_init(struct drm_device *dev);
 void msm_gem_shrinker_cleanup(struct drm_device *dev);
 
+int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
 int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
 void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
index 3df2554..38e3323 100644 (file)
@@ -46,12 +46,14 @@ bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence)
                (int32_t)(*fctx->fenceptr - fence) >= 0;
 }
 
-/* called from workqueue */
+/* called from irq handler and workqueue (in recover path) */
 void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
 {
-       spin_lock(&fctx->spinlock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&fctx->spinlock, flags);
        fctx->completed_fence = max(fence, fctx->completed_fence);
-       spin_unlock(&fctx->spinlock);
+       spin_unlock_irqrestore(&fctx->spinlock, flags);
 }
 
 struct msm_fence {
index 97d5b4d..7f92231 100644 (file)
@@ -439,14 +439,12 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
        return ret;
 }
 
-void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
+void msm_gem_unpin_locked(struct drm_gem_object *obj)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
        GEM_WARN_ON(!msm_gem_is_locked(obj));
 
-       msm_gem_unpin_vma(vma);
-
        msm_obj->pin_count--;
        GEM_WARN_ON(msm_obj->pin_count < 0);
 
@@ -586,7 +584,8 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj,
        msm_gem_lock(obj);
        vma = lookup_vma(obj, aspace);
        if (!GEM_WARN_ON(!vma)) {
-               msm_gem_unpin_vma_locked(obj, vma);
+               msm_gem_unpin_vma(vma);
+               msm_gem_unpin_locked(obj);
        }
        msm_gem_unlock(obj);
 }
index c75d3b8..6b7d5bb 100644 (file)
@@ -145,7 +145,7 @@ struct msm_gem_object {
 
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
-void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
+void msm_gem_unpin_locked(struct drm_gem_object *obj);
 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
                                           struct msm_gem_address_space *aspace);
 int msm_gem_get_iova(struct drm_gem_object *obj,
@@ -377,10 +377,11 @@ struct msm_gem_submit {
        } *cmd;  /* array of size nr_cmds */
        struct {
 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
-#define BO_VALID    0x8000   /* is current addr in cmdstream correct/valid? */
-#define BO_LOCKED   0x4000   /* obj lock is held */
-#define BO_ACTIVE   0x2000   /* active refcnt is held */
-#define BO_PINNED   0x1000   /* obj is pinned and on active list */
+#define BO_VALID       0x8000  /* is current addr in cmdstream correct/valid? */
+#define BO_LOCKED      0x4000  /* obj lock is held */
+#define BO_ACTIVE      0x2000  /* active refcnt is held */
+#define BO_OBJ_PINNED  0x1000  /* obj (pages) is pinned and on active list */
+#define BO_VMA_PINNED  0x0800  /* vma (virtual address) is pinned */
                uint32_t flags;
                union {
                        struct msm_gem_object *obj;
index 94ab705..dcc8a57 100644 (file)
 #include "msm_drv.h"
 #include "msm_gem.h"
 
+int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+       int ret;
+
+       /* Ensure the mmap offset is initialized.  We lazily initialize it,
+        * so if it has not been first mmap'd directly as a GEM object, the
+        * mmap offset will not be already initialized.
+        */
+       ret = drm_gem_create_mmap_offset(obj);
+       if (ret)
+               return ret;
+
+       return drm_gem_prime_mmap(obj, vma);
+}
+
 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
index 8097522..c9e4aeb 100644 (file)
@@ -232,8 +232,11 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
         */
        submit->bos[i].flags &= ~cleanup_flags;
 
-       if (flags & BO_PINNED)
-               msm_gem_unpin_vma_locked(obj, submit->bos[i].vma);
+       if (flags & BO_VMA_PINNED)
+               msm_gem_unpin_vma(submit->bos[i].vma);
+
+       if (flags & BO_OBJ_PINNED)
+               msm_gem_unpin_locked(obj);
 
        if (flags & BO_ACTIVE)
                msm_gem_active_put(obj);
@@ -244,7 +247,9 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
 
 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
 {
-       submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE | BO_LOCKED);
+       unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED |
+                                BO_ACTIVE | BO_LOCKED;
+       submit_cleanup_bo(submit, i, cleanup_flags);
 
        if (!(submit->bos[i].flags & BO_VALID))
                submit->bos[i].iova = 0;
@@ -375,7 +380,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
                if (ret)
                        break;
 
-               submit->bos[i].flags |= BO_PINNED;
+               submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED;
                submit->bos[i].vma = vma;
 
                if (vma->iova == submit->bos[i].iova) {
@@ -511,7 +516,7 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool error)
        unsigned i;
 
        if (error)
-               cleanup_flags |= BO_PINNED | BO_ACTIVE;
+               cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED | BO_ACTIVE;
 
        for (i = 0; i < submit->nr_bos; i++) {
                struct msm_gem_object *msm_obj = submit->bos[i].obj;
@@ -529,7 +534,8 @@ void msm_submit_retire(struct msm_gem_submit *submit)
                struct drm_gem_object *obj = &submit->bos[i].obj->base;
 
                msm_gem_lock(obj);
-               submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE);
+               /* Note, VMA already fence-unpinned before submit: */
+               submit_cleanup_bo(submit, i, BO_OBJ_PINNED | BO_ACTIVE);
                msm_gem_unlock(obj);
                drm_gem_object_put(obj);
        }
@@ -922,7 +928,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                                                    INT_MAX, GFP_KERNEL);
        }
        if (submit->fence_id < 0) {
-               ret = submit->fence_id = 0;
+               ret = submit->fence_id;
                submit->fence_id = 0;
        }
 
index 3c1dc92..c471aeb 100644 (file)
@@ -62,8 +62,7 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
        unsigned size = vma->node.size;
 
        /* Print a message if we try to purge a vma in use */
-       if (GEM_WARN_ON(msm_gem_vma_inuse(vma)))
-               return;
+       GEM_WARN_ON(msm_gem_vma_inuse(vma));
 
        /* Don't do anything if the memory isn't mapped */
        if (!vma->mapped)
@@ -128,8 +127,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
 void msm_gem_close_vma(struct msm_gem_address_space *aspace,
                struct msm_gem_vma *vma)
 {
-       if (GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped))
-               return;
+       GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped);
 
        spin_lock(&aspace->lock);
        if (vma->iova)
index eb8a666..c8cd9bf 100644 (file)
@@ -164,24 +164,6 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
        return ret;
 }
 
-static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
-               uint32_t fence)
-{
-       struct msm_gem_submit *submit;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ring->submit_lock, flags);
-       list_for_each_entry(submit, &ring->submits, node) {
-               if (fence_after(submit->seqno, fence))
-                       break;
-
-               msm_update_fence(submit->ring->fctx,
-                       submit->hw_fence->seqno);
-               dma_fence_signal(submit->hw_fence);
-       }
-       spin_unlock_irqrestore(&ring->submit_lock, flags);
-}
-
 #ifdef CONFIG_DEV_COREDUMP
 static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
                size_t count, void *data, size_t datalen)
@@ -436,9 +418,9 @@ static void recover_worker(struct kthread_work *work)
                 * one more to clear the faulting submit
                 */
                if (ring == cur_ring)
-                       fence++;
+                       ring->memptrs->fence = ++fence;
 
-               update_fences(gpu, ring, fence);
+               msm_update_fence(ring->fctx, fence);
        }
 
        if (msm_gpu_active(gpu)) {
@@ -672,7 +654,6 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
        msm_submit_retire(submit);
 
        pm_runtime_mark_last_busy(&gpu->pdev->dev);
-       pm_runtime_put_autosuspend(&gpu->pdev->dev);
 
        spin_lock_irqsave(&ring->submit_lock, flags);
        list_del(&submit->node);
@@ -686,6 +667,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
                msm_devfreq_idle(gpu);
        mutex_unlock(&gpu->active_lock);
 
+       pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
        msm_gem_submit_put(submit);
 }
 
@@ -735,7 +718,7 @@ void msm_gpu_retire(struct msm_gpu *gpu)
        int i;
 
        for (i = 0; i < gpu->nr_rings; i++)
-               update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
+               msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
 
        kthread_queue_work(gpu->worker, &gpu->retire_work);
        update_sw_cntrs(gpu);
index bcaddbb..a54ed35 100644 (file)
@@ -58,7 +58,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
        u64 addr = iova;
        unsigned int i;
 
-       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+       for_each_sgtable_sg(sgt, sg, i) {
                size_t size = sg->length;
                phys_addr_t phys = sg_phys(sg);
 
index 4306632..56eecb4 100644 (file)
@@ -25,7 +25,7 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
 
                msm_gem_lock(obj);
                msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx);
-               submit->bos[i].flags &= ~BO_PINNED;
+               submit->bos[i].flags &= ~BO_VMA_PINNED;
                msm_gem_unlock(obj);
        }
 
index 7ba66ad..1635661 100644 (file)
@@ -680,7 +680,11 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
                goto out_free_dma;
 
        for (i = 0; i < npages; i += max) {
-               args.end = start + (max << PAGE_SHIFT);
+               if (args.start + (max << PAGE_SHIFT) > end)
+                       args.end = end;
+               else
+                       args.end = args.start + (max << PAGE_SHIFT);
+
                ret = migrate_vma_setup(&args);
                if (ret)
                        goto out_free_pfns;
index c960144..a189982 100644 (file)
@@ -713,7 +713,7 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
        of_property_read_u32(dev->of_node, "hpd-reliable-delay-ms", &reliable_ms);
        desc->delay.hpd_reliable = reliable_ms;
        of_property_read_u32(dev->of_node, "hpd-absent-delay-ms", &absent_ms);
-       desc->delay.hpd_reliable = absent_ms;
+       desc->delay.hpd_absent = absent_ms;
 
        /* Power the panel on so we can read the EDID */
        ret = pm_runtime_get_sync(dev);
index 087e69b..b1e6d23 100644 (file)
@@ -433,8 +433,8 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
 
        if (args->retained) {
                if (args->madv == PANFROST_MADV_DONTNEED)
-                       list_add_tail(&bo->base.madv_list,
-                                     &pfdev->shrinker_list);
+                       list_move_tail(&bo->base.madv_list,
+                                      &pfdev->shrinker_list);
                else if (args->madv == PANFROST_MADV_WILLNEED)
                        list_del_init(&bo->base.madv_list);
        }
index d3f82b2..b285a80 100644 (file)
@@ -518,7 +518,7 @@ err_map:
 err_pages:
        drm_gem_shmem_put_pages(&bo->base);
 err_bo:
-       drm_gem_object_put(&bo->base.base);
+       panfrost_gem_mapping_put(bomapping);
        return ret;
 }
 
index 67d38f5..13ed33e 100644 (file)
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_vblank.h>
 
+#if defined(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#else
+#define arm_iommu_detach_device(...)   ({ })
+#define arm_iommu_release_mapping(...) ({ })
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_fb.h"
 #include "rockchip_drm_gem.h"
@@ -49,6 +57,15 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
        if (!private->domain)
                return 0;
 
+       if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
+               struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+               if (mapping) {
+                       arm_iommu_detach_device(dev);
+                       arm_iommu_release_mapping(mapping);
+               }
+       }
+
        ret = iommu_attach_device(private->domain, dev);
        if (ret) {
                DRM_DEV_ERROR(dev, "Failed to attach iommu device\n");
index 191c560..6b25b2f 100644 (file)
@@ -190,7 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
 }
 EXPORT_SYMBOL(drm_sched_entity_flush);
 
-static void drm_sched_entity_kill_jobs_irq_work(struct irq_work *wrk)
+static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
 {
        struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
 
@@ -207,8 +207,8 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
        struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
                                                 finish_cb);
 
-       init_irq_work(&job->work, drm_sched_entity_kill_jobs_irq_work);
-       irq_work_queue(&job->work);
+       INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
+       schedule_work(&job->work);
 }
 
 static struct dma_fence *
index 0839444..f4886e6 100644 (file)
@@ -350,7 +350,7 @@ static int ssd130x_init(struct ssd130x_device *ssd130x)
 
        /* Set precharge period in number of ticks from the internal clock */
        precharge = (SSD130X_SET_PRECHARGE_PERIOD1_SET(ssd130x->prechargep1) |
-                    SSD130X_SET_PRECHARGE_PERIOD1_SET(ssd130x->prechargep2));
+                    SSD130X_SET_PRECHARGE_PERIOD2_SET(ssd130x->prechargep2));
        ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_PRECHARGE_PERIOD, precharge);
        if (ret < 0)
                return ret;
index 275f7e4..6eb1aab 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/component.h>
+#include <linux/dma-mapping.h>
 #include <linux/kfifo.h>
 #include <linux/module.h>
 #include <linux/of_graph.h>
@@ -73,7 +74,6 @@ static int sun4i_drv_bind(struct device *dev)
                goto free_drm;
        }
 
-       dev_set_drvdata(dev, drm);
        drm->dev_private = drv;
        INIT_LIST_HEAD(&drv->frontend_list);
        INIT_LIST_HEAD(&drv->engine_list);
@@ -114,6 +114,8 @@ static int sun4i_drv_bind(struct device *dev)
 
        drm_fbdev_generic_setup(drm, 32);
 
+       dev_set_drvdata(dev, drm);
+
        return 0;
 
 finish_poll:
@@ -130,6 +132,7 @@ static void sun4i_drv_unbind(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
 
+       dev_set_drvdata(dev, NULL);
        drm_dev_unregister(drm);
        drm_kms_helper_poll_fini(drm);
        drm_atomic_helper_shutdown(drm);
@@ -367,6 +370,13 @@ static int sun4i_drv_probe(struct platform_device *pdev)
 
        INIT_KFIFO(list.fifo);
 
+       /*
+        * DE2 and DE3 cores actually supports 40-bit addresses, but
+        * driver does not.
+        */
+       dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
        for (i = 0;; i++) {
                struct device_node *pipeline = of_parse_phandle(np,
                                                                "allwinner,pipelines",
index 6d43080..85fb9e8 100644 (file)
@@ -117,7 +117,7 @@ static bool sun4i_layer_format_mod_supported(struct drm_plane *plane,
        struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
 
        if (IS_ERR_OR_NULL(layer->backend->frontend))
-               sun4i_backend_format_is_supported(format, modifier);
+               return sun4i_backend_format_is_supported(format, modifier);
 
        return sun4i_backend_format_is_supported(format, modifier) ||
               sun4i_frontend_format_is_supported(format, modifier);
index a8d75fd..477cb69 100644 (file)
@@ -93,34 +93,10 @@ crtcs_exit:
        return crtcs;
 }
 
-static int sun8i_dw_hdmi_find_connector_pdev(struct device *dev,
-                                            struct platform_device **pdev_out)
-{
-       struct platform_device *pdev;
-       struct device_node *remote;
-
-       remote = of_graph_get_remote_node(dev->of_node, 1, -1);
-       if (!remote)
-               return -ENODEV;
-
-       if (!of_device_is_compatible(remote, "hdmi-connector")) {
-               of_node_put(remote);
-               return -ENODEV;
-       }
-
-       pdev = of_find_device_by_node(remote);
-       of_node_put(remote);
-       if (!pdev)
-               return -ENODEV;
-
-       *pdev_out = pdev;
-       return 0;
-}
-
 static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
                              void *data)
 {
-       struct platform_device *pdev = to_platform_device(dev), *connector_pdev;
+       struct platform_device *pdev = to_platform_device(dev);
        struct dw_hdmi_plat_data *plat_data;
        struct drm_device *drm = data;
        struct device_node *phy_node;
@@ -167,30 +143,16 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
                return dev_err_probe(dev, PTR_ERR(hdmi->regulator),
                                     "Couldn't get regulator\n");
 
-       ret = sun8i_dw_hdmi_find_connector_pdev(dev, &connector_pdev);
-       if (!ret) {
-               hdmi->ddc_en = gpiod_get_optional(&connector_pdev->dev,
-                                                 "ddc-en", GPIOD_OUT_HIGH);
-               platform_device_put(connector_pdev);
-
-               if (IS_ERR(hdmi->ddc_en)) {
-                       dev_err(dev, "Couldn't get ddc-en gpio\n");
-                       return PTR_ERR(hdmi->ddc_en);
-               }
-       }
-
        ret = regulator_enable(hdmi->regulator);
        if (ret) {
                dev_err(dev, "Failed to enable regulator\n");
-               goto err_unref_ddc_en;
+               return ret;
        }
 
-       gpiod_set_value(hdmi->ddc_en, 1);
-
        ret = reset_control_deassert(hdmi->rst_ctrl);
        if (ret) {
                dev_err(dev, "Could not deassert ctrl reset control\n");
-               goto err_disable_ddc_en;
+               goto err_disable_regulator;
        }
 
        ret = clk_prepare_enable(hdmi->clk_tmds);
@@ -245,12 +207,8 @@ err_disable_clk_tmds:
        clk_disable_unprepare(hdmi->clk_tmds);
 err_assert_ctrl_reset:
        reset_control_assert(hdmi->rst_ctrl);
-err_disable_ddc_en:
-       gpiod_set_value(hdmi->ddc_en, 0);
+err_disable_regulator:
        regulator_disable(hdmi->regulator);
-err_unref_ddc_en:
-       if (hdmi->ddc_en)
-               gpiod_put(hdmi->ddc_en);
 
        return ret;
 }
@@ -264,11 +222,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
        sun8i_hdmi_phy_deinit(hdmi->phy);
        clk_disable_unprepare(hdmi->clk_tmds);
        reset_control_assert(hdmi->rst_ctrl);
-       gpiod_set_value(hdmi->ddc_en, 0);
        regulator_disable(hdmi->regulator);
-
-       if (hdmi->ddc_en)
-               gpiod_put(hdmi->ddc_en);
 }
 
 static const struct component_ops sun8i_dw_hdmi_ops = {
index bffe1b9..9ad0952 100644 (file)
@@ -9,7 +9,6 @@
 #include <drm/bridge/dw_hdmi.h>
 #include <drm/drm_encoder.h>
 #include <linux/clk.h>
-#include <linux/gpio/consumer.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
@@ -193,7 +192,6 @@ struct sun8i_dw_hdmi {
        struct regulator                *regulator;
        const struct sun8i_dw_hdmi_quirks *quirks;
        struct reset_control            *rst_ctrl;
-       struct gpio_desc                *ddc_en;
 };
 
 extern struct platform_driver sun8i_hdmi_phy_driver;
index 768242a..5422363 100644 (file)
@@ -627,7 +627,7 @@ static const struct drm_connector_funcs simpledrm_connector_funcs = {
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
-static int
+static enum drm_mode_status
 simpledrm_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
                                    const struct drm_display_mode *mode)
 {
index 75d308e..406e9c3 100644 (file)
@@ -109,11 +109,11 @@ void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
                return;
 
        spin_lock(&bo->bdev->lru_lock);
-       if (bo->bulk_move && bo->resource)
-               ttm_lru_bulk_move_del(bo->bulk_move, bo->resource);
+       if (bo->resource)
+               ttm_resource_del_bulk_move(bo->resource, bo);
        bo->bulk_move = bulk;
-       if (bo->bulk_move && bo->resource)
-               ttm_lru_bulk_move_add(bo->bulk_move, bo->resource);
+       if (bo->resource)
+               ttm_resource_add_bulk_move(bo->resource, bo);
        spin_unlock(&bo->bdev->lru_lock);
 }
 EXPORT_SYMBOL(ttm_bo_set_bulk_move);
@@ -689,8 +689,11 @@ void ttm_bo_pin(struct ttm_buffer_object *bo)
 {
        dma_resv_assert_held(bo->base.resv);
        WARN_ON_ONCE(!kref_read(&bo->kref));
-       if (!(bo->pin_count++) && bo->bulk_move && bo->resource)
-               ttm_lru_bulk_move_del(bo->bulk_move, bo->resource);
+       spin_lock(&bo->bdev->lru_lock);
+       if (bo->resource)
+               ttm_resource_del_bulk_move(bo->resource, bo);
+       ++bo->pin_count;
+       spin_unlock(&bo->bdev->lru_lock);
 }
 EXPORT_SYMBOL(ttm_bo_pin);
 
@@ -707,8 +710,11 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo)
        if (WARN_ON_ONCE(!bo->pin_count))
                return;
 
-       if (!(--bo->pin_count) && bo->bulk_move && bo->resource)
-               ttm_lru_bulk_move_add(bo->bulk_move, bo->resource);
+       spin_lock(&bo->bdev->lru_lock);
+       --bo->pin_count;
+       if (bo->resource)
+               ttm_resource_add_bulk_move(bo->resource, bo);
+       spin_unlock(&bo->bdev->lru_lock);
 }
 EXPORT_SYMBOL(ttm_bo_unpin);
 
index a0562ab..e7147e3 100644 (file)
@@ -156,8 +156,12 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
 
                ttm_resource_manager_for_each_res(man, &cursor, res) {
                        struct ttm_buffer_object *bo = res->bo;
-                       uint32_t num_pages = PFN_UP(bo->base.size);
+                       uint32_t num_pages;
 
+                       if (!bo)
+                               continue;
+
+                       num_pages = PFN_UP(bo->base.size);
                        ret = ttm_bo_swapout(bo, ctx, gfp_flags);
                        /* ttm_bo_swapout has dropped the lru_lock */
                        if (!ret)
index 65889b3..20f9adc 100644 (file)
@@ -91,8 +91,8 @@ static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
 }
 
 /* Add the resource to a bulk_move cursor */
-void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
-                          struct ttm_resource *res)
+static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
+                                 struct ttm_resource *res)
 {
        struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
 
@@ -105,8 +105,8 @@ void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
 }
 
 /* Remove the resource from a bulk_move range */
-void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
-                          struct ttm_resource *res)
+static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
+                                 struct ttm_resource *res)
 {
        struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
 
@@ -122,6 +122,22 @@ void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
        }
 }
 
+/* Add the resource to a bulk move if the BO is configured for it */
+void ttm_resource_add_bulk_move(struct ttm_resource *res,
+                               struct ttm_buffer_object *bo)
+{
+       if (bo->bulk_move && !bo->pin_count)
+               ttm_lru_bulk_move_add(bo->bulk_move, res);
+}
+
+/* Remove the resource from a bulk move if the BO is configured for it */
+void ttm_resource_del_bulk_move(struct ttm_resource *res,
+                               struct ttm_buffer_object *bo)
+{
+       if (bo->bulk_move && !bo->pin_count)
+               ttm_lru_bulk_move_del(bo->bulk_move, res);
+}
+
 /* Move a resource to the LRU or bulk tail */
 void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
 {
@@ -169,15 +185,14 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
        res->bus.is_iomem = false;
        res->bus.caching = ttm_cached;
        res->bo = bo;
-       INIT_LIST_HEAD(&res->lru);
 
        man = ttm_manager_type(bo->bdev, place->mem_type);
        spin_lock(&bo->bdev->lru_lock);
-       man->usage += res->num_pages << PAGE_SHIFT;
-       if (bo->bulk_move)
-               ttm_lru_bulk_move_add(bo->bulk_move, res);
+       if (bo->pin_count)
+               list_add_tail(&res->lru, &bo->bdev->pinned);
        else
-               ttm_resource_move_to_lru_tail(res);
+               list_add_tail(&res->lru, &man->lru[bo->priority]);
+       man->usage += res->num_pages << PAGE_SHIFT;
        spin_unlock(&bo->bdev->lru_lock);
 }
 EXPORT_SYMBOL(ttm_resource_init);
@@ -210,8 +225,16 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
 {
        struct ttm_resource_manager *man =
                ttm_manager_type(bo->bdev, place->mem_type);
+       int ret;
+
+       ret = man->func->alloc(man, bo, place, res_ptr);
+       if (ret)
+               return ret;
 
-       return man->func->alloc(man, bo, place, res_ptr);
+       spin_lock(&bo->bdev->lru_lock);
+       ttm_resource_add_bulk_move(*res_ptr, bo);
+       spin_unlock(&bo->bdev->lru_lock);
+       return 0;
 }
 
 void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
@@ -221,12 +244,9 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
        if (!*res)
                return;
 
-       if (bo->bulk_move) {
-               spin_lock(&bo->bdev->lru_lock);
-               ttm_lru_bulk_move_del(bo->bulk_move, *res);
-               spin_unlock(&bo->bdev->lru_lock);
-       }
-
+       spin_lock(&bo->bdev->lru_lock);
+       ttm_resource_del_bulk_move(*res, bo);
+       spin_unlock(&bo->bdev->lru_lock);
        man = ttm_manager_type(bo->bdev, (*res)->mem_type);
        man->func->free(man, *res);
        *res = NULL;
index 49c0f2a..b8d8563 100644 (file)
@@ -248,6 +248,9 @@ void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
 {
        struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        mutex_lock(&vc4->purgeable.lock);
        list_add_tail(&bo->size_head, &vc4->purgeable.list);
        vc4->purgeable.num++;
@@ -259,6 +262,9 @@ static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
 {
        struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        /* list_del_init() is used here because the caller might release
         * the purgeable lock in order to acquire the madv one and update the
         * madv status.
@@ -387,6 +393,9 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct vc4_bo *bo;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return ERR_PTR(-ENODEV);
+
        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
        if (!bo)
                return ERR_PTR(-ENOMEM);
@@ -413,6 +422,9 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
        struct drm_gem_cma_object *cma_obj;
        struct vc4_bo *bo;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return ERR_PTR(-ENODEV);
+
        if (size == 0)
                return ERR_PTR(-EINVAL);
 
@@ -471,19 +483,20 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
        return bo;
 }
 
-int vc4_dumb_create(struct drm_file *file_priv,
-                   struct drm_device *dev,
-                   struct drm_mode_create_dumb *args)
+int vc4_bo_dumb_create(struct drm_file *file_priv,
+                      struct drm_device *dev,
+                      struct drm_mode_create_dumb *args)
 {
-       int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct vc4_bo *bo = NULL;
        int ret;
 
-       if (args->pitch < min_pitch)
-               args->pitch = min_pitch;
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
 
-       if (args->size < args->pitch * args->height)
-               args->size = args->pitch * args->height;
+       ret = vc4_dumb_fixup_args(args);
+       if (ret)
+               return ret;
 
        bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
        if (IS_ERR(bo))
@@ -601,8 +614,12 @@ static void vc4_bo_cache_time_work(struct work_struct *work)
 
 int vc4_bo_inc_usecnt(struct vc4_bo *bo)
 {
+       struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
        int ret;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        /* Fast path: if the BO is already retained by someone, no need to
         * check the madv status.
         */
@@ -637,6 +654,11 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo)
 
 void vc4_bo_dec_usecnt(struct vc4_bo *bo)
 {
+       struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        /* Fast path: if the BO is still retained by someone, no need to test
         * the madv value.
         */
@@ -756,6 +778,9 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
        struct vc4_bo *bo = NULL;
        int ret;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        ret = vc4_grab_bin_bo(vc4, vc4file);
        if (ret)
                return ret;
@@ -779,9 +804,13 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
                      struct drm_file *file_priv)
 {
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct drm_vc4_mmap_bo *args = data;
        struct drm_gem_object *gem_obj;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        gem_obj = drm_gem_object_lookup(file_priv, args->handle);
        if (!gem_obj) {
                DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
@@ -805,6 +834,9 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
        struct vc4_bo *bo = NULL;
        int ret;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        if (args->size == 0)
                return -EINVAL;
 
@@ -875,11 +907,15 @@ fail:
 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
                         struct drm_file *file_priv)
 {
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct drm_vc4_set_tiling *args = data;
        struct drm_gem_object *gem_obj;
        struct vc4_bo *bo;
        bool t_format;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        if (args->flags != 0)
                return -EINVAL;
 
@@ -918,10 +954,14 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
                         struct drm_file *file_priv)
 {
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct drm_vc4_get_tiling *args = data;
        struct drm_gem_object *gem_obj;
        struct vc4_bo *bo;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        if (args->flags != 0 || args->modifier != 0)
                return -EINVAL;
 
@@ -948,6 +988,9 @@ int vc4_bo_cache_init(struct drm_device *dev)
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        int i;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        /* Create the initial set of BO labels that the kernel will
         * use.  This lets us avoid a bunch of string reallocation in
         * the kernel's draw and BO allocation paths.
@@ -1007,6 +1050,9 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
        struct drm_gem_object *gem_obj;
        int ret = 0, label;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        if (!args->len)
                return -EINVAL;
 
index 59b20c8..9355213 100644 (file)
@@ -256,7 +256,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
                 * Removing 1 from the FIFO full level however
                 * seems to completely remove that issue.
                 */
-               if (!vc4->hvs->hvs5)
+               if (!vc4->is_vc5)
                        return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
 
                return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
@@ -389,7 +389,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
        if (is_dsi)
                CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
 
-       if (vc4->hvs->hvs5)
+       if (vc4->is_vc5)
                CRTC_WRITE(PV_MUX_CFG,
                           VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP,
                                         PV_MUX_CFG_RGB_PIXEL_MUX_MODE));
@@ -775,17 +775,18 @@ struct vc4_async_flip_state {
        struct drm_framebuffer *old_fb;
        struct drm_pending_vblank_event *event;
 
-       struct vc4_seqno_cb cb;
+       union {
+               struct dma_fence_cb fence;
+               struct vc4_seqno_cb seqno;
+       } cb;
 };
 
 /* Called when the V3D execution for the BO being flipped to is done, so that
  * we can actually update the plane's address to point to it.
  */
 static void
-vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
+vc4_async_page_flip_complete(struct vc4_async_flip_state *flip_state)
 {
-       struct vc4_async_flip_state *flip_state =
-               container_of(cb, struct vc4_async_flip_state, cb);
        struct drm_crtc *crtc = flip_state->crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_plane *plane = crtc->primary;
@@ -802,59 +803,96 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
        drm_crtc_vblank_put(crtc);
        drm_framebuffer_put(flip_state->fb);
 
-       /* Decrement the BO usecnt in order to keep the inc/dec calls balanced
-        * when the planes are updated through the async update path.
-        * FIXME: we should move to generic async-page-flip when it's
-        * available, so that we can get rid of this hand-made cleanup_fb()
-        * logic.
-        */
-       if (flip_state->old_fb) {
-               struct drm_gem_cma_object *cma_bo;
-               struct vc4_bo *bo;
+       if (flip_state->old_fb)
+               drm_framebuffer_put(flip_state->old_fb);
+
+       kfree(flip_state);
+}
+
+static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb)
+{
+       struct vc4_async_flip_state *flip_state =
+               container_of(cb, struct vc4_async_flip_state, cb.seqno);
+       struct vc4_bo *bo = NULL;
 
-               cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
+       if (flip_state->old_fb) {
+               struct drm_gem_cma_object *cma_bo =
+                       drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
                bo = to_vc4_bo(&cma_bo->base);
-               vc4_bo_dec_usecnt(bo);
-               drm_framebuffer_put(flip_state->old_fb);
        }
 
-       kfree(flip_state);
+       vc4_async_page_flip_complete(flip_state);
+
+       /*
+        * Decrement the BO usecnt in order to keep the inc/dec
+        * calls balanced when the planes are updated through
+        * the async update path.
+        *
+        * FIXME: we should move to generic async-page-flip when
+        * it's available, so that we can get rid of this
+        * hand-made cleanup_fb() logic.
+        */
+       if (bo)
+               vc4_bo_dec_usecnt(bo);
 }
 
-/* Implements async (non-vblank-synced) page flips.
- *
- * The page flip ioctl needs to return immediately, so we grab the
- * modeset semaphore on the pipe, and queue the address update for
- * when V3D is done with the BO being flipped to.
- */
-static int vc4_async_page_flip(struct drm_crtc *crtc,
-                              struct drm_framebuffer *fb,
-                              struct drm_pending_vblank_event *event,
-                              uint32_t flags)
+static void vc4_async_page_flip_fence_complete(struct dma_fence *fence,
+                                              struct dma_fence_cb *cb)
 {
-       struct drm_device *dev = crtc->dev;
-       struct drm_plane *plane = crtc->primary;
-       int ret = 0;
-       struct vc4_async_flip_state *flip_state;
+       struct vc4_async_flip_state *flip_state =
+               container_of(cb, struct vc4_async_flip_state, cb.fence);
+
+       vc4_async_page_flip_complete(flip_state);
+       dma_fence_put(fence);
+}
+
+static int vc4_async_set_fence_cb(struct drm_device *dev,
+                                 struct vc4_async_flip_state *flip_state)
+{
+       struct drm_framebuffer *fb = flip_state->fb;
        struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
-       struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct dma_fence *fence;
+       int ret;
 
-       /* Increment the BO usecnt here, so that we never end up with an
-        * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
-        * plane is later updated through the non-async path.
-        * FIXME: we should move to generic async-page-flip when it's
-        * available, so that we can get rid of this hand-made prepare_fb()
-        * logic.
-        */
-       ret = vc4_bo_inc_usecnt(bo);
+       if (!vc4->is_vc5) {
+               struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+
+               return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
+                                         vc4_async_page_flip_seqno_complete);
+       }
+
+       ret = dma_resv_get_singleton(cma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
        if (ret)
                return ret;
 
+       /* If there's no fence, complete the page flip immediately */
+       if (!fence) {
+               vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
+               return 0;
+       }
+
+       /* If the fence has already been completed, complete the page flip */
+       if (dma_fence_add_callback(fence, &flip_state->cb.fence,
+                                  vc4_async_page_flip_fence_complete))
+               vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
+
+       return 0;
+}
+
+static int
+vc4_async_page_flip_common(struct drm_crtc *crtc,
+                          struct drm_framebuffer *fb,
+                          struct drm_pending_vblank_event *event,
+                          uint32_t flags)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_plane *plane = crtc->primary;
+       struct vc4_async_flip_state *flip_state;
+
        flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
-       if (!flip_state) {
-               vc4_bo_dec_usecnt(bo);
+       if (!flip_state)
                return -ENOMEM;
-       }
 
        drm_framebuffer_get(fb);
        flip_state->fb = fb;
@@ -881,23 +919,79 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
         */
        drm_atomic_set_fb_for_plane(plane->state, fb);
 
-       vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
-                          vc4_async_page_flip_complete);
+       vc4_async_set_fence_cb(dev, flip_state);
 
        /* Driver takes ownership of state on successful async commit. */
        return 0;
 }
 
+/* Implements async (non-vblank-synced) page flips.
+ *
+ * The page flip ioctl needs to return immediately, so we grab the
+ * modeset semaphore on the pipe, and queue the address update for
+ * when V3D is done with the BO being flipped to.
+ */
+static int vc4_async_page_flip(struct drm_crtc *crtc,
+                              struct drm_framebuffer *fb,
+                              struct drm_pending_vblank_event *event,
+                              uint32_t flags)
+{
+       struct drm_device *dev = crtc->dev;
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
+       struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+       int ret;
+
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
+       /*
+        * Increment the BO usecnt here, so that we never end up with an
+        * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
+        * plane is later updated through the non-async path.
+        *
+        * FIXME: we should move to generic async-page-flip when
+        * it's available, so that we can get rid of this
+        * hand-made prepare_fb() logic.
+        */
+       ret = vc4_bo_inc_usecnt(bo);
+       if (ret)
+               return ret;
+
+       ret = vc4_async_page_flip_common(crtc, fb, event, flags);
+       if (ret) {
+               vc4_bo_dec_usecnt(bo);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int vc5_async_page_flip(struct drm_crtc *crtc,
+                              struct drm_framebuffer *fb,
+                              struct drm_pending_vblank_event *event,
+                              uint32_t flags)
+{
+       return vc4_async_page_flip_common(crtc, fb, event, flags);
+}
+
 int vc4_page_flip(struct drm_crtc *crtc,
                  struct drm_framebuffer *fb,
                  struct drm_pending_vblank_event *event,
                  uint32_t flags,
                  struct drm_modeset_acquire_ctx *ctx)
 {
-       if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
-               return vc4_async_page_flip(crtc, fb, event, flags);
-       else
+       if (flags & DRM_MODE_PAGE_FLIP_ASYNC) {
+               struct drm_device *dev = crtc->dev;
+               struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+               if (vc4->is_vc5)
+                       return vc5_async_page_flip(crtc, fb, event, flags);
+               else
+                       return vc4_async_page_flip(crtc, fb, event, flags);
+       } else {
                return drm_atomic_helper_page_flip(crtc, fb, event, flags, ctx);
+       }
 }
 
 struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
@@ -1149,7 +1243,7 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
                                  crtc_funcs, NULL);
        drm_crtc_helper_add(crtc, crtc_helper_funcs);
 
-       if (!vc4->hvs->hvs5) {
+       if (!vc4->is_vc5) {
                drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
 
                drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
index 162bc18..0f0f026 100644 (file)
@@ -63,6 +63,32 @@ void __iomem *vc4_ioremap_regs(struct platform_device *pdev, int index)
        return map;
 }
 
+int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args)
+{
+       int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+       if (args->pitch < min_pitch)
+               args->pitch = min_pitch;
+
+       if (args->size < args->pitch * args->height)
+               args->size = args->pitch * args->height;
+
+       return 0;
+}
+
+static int vc5_dumb_create(struct drm_file *file_priv,
+                          struct drm_device *dev,
+                          struct drm_mode_create_dumb *args)
+{
+       int ret;
+
+       ret = vc4_dumb_fixup_args(args);
+       if (ret)
+               return ret;
+
+       return drm_gem_cma_dumb_create_internal(file_priv, dev, args);
+}
+
 static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
                               struct drm_file *file_priv)
 {
@@ -73,6 +99,9 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
        if (args->pad != 0)
                return -EINVAL;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        if (!vc4->v3d)
                return -ENODEV;
 
@@ -116,11 +145,16 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
 
 static int vc4_open(struct drm_device *dev, struct drm_file *file)
 {
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct vc4_file *vc4file;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL);
        if (!vc4file)
                return -ENOMEM;
+       vc4file->dev = vc4;
 
        vc4_perfmon_open_file(vc4file);
        file->driver_priv = vc4file;
@@ -132,6 +166,9 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file)
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct vc4_file *vc4file = file->driver_priv;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        if (vc4file->bin_bo_used)
                vc4_v3d_bin_bo_put(vc4);
 
@@ -160,7 +197,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
        DRM_IOCTL_DEF_DRV(VC4_PERFMON_GET_VALUES, vc4_perfmon_get_values_ioctl, DRM_RENDER_ALLOW),
 };
 
-static struct drm_driver vc4_drm_driver = {
+static const struct drm_driver vc4_drm_driver = {
        .driver_features = (DRIVER_MODESET |
                            DRIVER_ATOMIC |
                            DRIVER_GEM |
@@ -175,7 +212,7 @@ static struct drm_driver vc4_drm_driver = {
 
        .gem_create_object = vc4_create_object,
 
-       DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_dumb_create),
+       DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create),
 
        .ioctls = vc4_drm_ioctls,
        .num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
@@ -189,6 +226,27 @@ static struct drm_driver vc4_drm_driver = {
        .patchlevel = DRIVER_PATCHLEVEL,
 };
 
+static const struct drm_driver vc5_drm_driver = {
+       .driver_features = (DRIVER_MODESET |
+                           DRIVER_ATOMIC |
+                           DRIVER_GEM),
+
+#if defined(CONFIG_DEBUG_FS)
+       .debugfs_init = vc4_debugfs_init,
+#endif
+
+       DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create),
+
+       .fops = &vc4_drm_fops,
+
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
+};
+
 static void vc4_match_add_drivers(struct device *dev,
                                  struct component_match **match,
                                  struct platform_driver *const *drivers,
@@ -212,42 +270,49 @@ static void vc4_match_add_drivers(struct device *dev,
 static int vc4_drm_bind(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
+       const struct drm_driver *driver;
        struct rpi_firmware *firmware = NULL;
        struct drm_device *drm;
        struct vc4_dev *vc4;
        struct device_node *node;
        struct drm_crtc *crtc;
+       bool is_vc5;
        int ret = 0;
 
        dev->coherent_dma_mask = DMA_BIT_MASK(32);
 
-       /* If VC4 V3D is missing, don't advertise render nodes. */
-       node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL);
-       if (!node || !of_device_is_available(node))
-               vc4_drm_driver.driver_features &= ~DRIVER_RENDER;
-       of_node_put(node);
+       is_vc5 = of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5");
+       if (is_vc5)
+               driver = &vc5_drm_driver;
+       else
+               driver = &vc4_drm_driver;
 
-       vc4 = devm_drm_dev_alloc(dev, &vc4_drm_driver, struct vc4_dev, base);
+       vc4 = devm_drm_dev_alloc(dev, driver, struct vc4_dev, base);
        if (IS_ERR(vc4))
                return PTR_ERR(vc4);
+       vc4->is_vc5 = is_vc5;
 
        drm = &vc4->base;
        platform_set_drvdata(pdev, drm);
        INIT_LIST_HEAD(&vc4->debugfs_list);
 
-       mutex_init(&vc4->bin_bo_lock);
+       if (!is_vc5) {
+               mutex_init(&vc4->bin_bo_lock);
 
-       ret = vc4_bo_cache_init(drm);
-       if (ret)
-               return ret;
+               ret = vc4_bo_cache_init(drm);
+               if (ret)
+                       return ret;
+       }
 
        ret = drmm_mode_config_init(drm);
        if (ret)
                return ret;
 
-       ret = vc4_gem_init(drm);
-       if (ret)
-               return ret;
+       if (!is_vc5) {
+               ret = vc4_gem_init(drm);
+               if (ret)
+                       return ret;
+       }
 
        node = of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware");
        if (node) {
@@ -258,7 +323,7 @@ static int vc4_drm_bind(struct device *dev)
                        return -EPROBE_DEFER;
        }
 
-       ret = drm_aperture_remove_framebuffers(false, &vc4_drm_driver);
+       ret = drm_aperture_remove_framebuffers(false, driver);
        if (ret)
                return ret;
 
index 15e0c2a..93fd55b 100644 (file)
@@ -48,6 +48,8 @@ enum vc4_kernel_bo_type {
  * done. This way, only events related to a specific job will be counted.
  */
 struct vc4_perfmon {
+       struct vc4_dev *dev;
+
        /* Tracks the number of users of the perfmon, when this counter reaches
         * zero the perfmon is destroyed.
         */
@@ -74,6 +76,8 @@ struct vc4_perfmon {
 struct vc4_dev {
        struct drm_device base;
 
+       bool is_vc5;
+
        unsigned int irq;
 
        struct vc4_hvs *hvs;
@@ -316,6 +320,7 @@ struct vc4_v3d {
 };
 
 struct vc4_hvs {
+       struct vc4_dev *vc4;
        struct platform_device *pdev;
        void __iomem *regs;
        u32 __iomem *dlist;
@@ -333,9 +338,6 @@ struct vc4_hvs {
        struct drm_mm_node mitchell_netravali_filter;
 
        struct debugfs_regset32 regset;
-
-       /* HVS version 5 flag, therefore requires updated dlist structures */
-       bool hvs5;
 };
 
 struct vc4_plane {
@@ -580,6 +582,8 @@ to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
 #define VC4_REG32(reg) { .name = #reg, .offset = reg }
 
 struct vc4_exec_info {
+       struct vc4_dev *dev;
+
        /* Sequence number for this bin/render job. */
        uint64_t seqno;
 
@@ -701,6 +705,8 @@ struct vc4_exec_info {
  * released when the DRM file is closed should be placed here.
  */
 struct vc4_file {
+       struct vc4_dev *dev;
+
        struct {
                struct idr idr;
                struct mutex lock;
@@ -814,9 +820,9 @@ struct vc4_validated_shader_info {
 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
                             bool from_cache, enum vc4_kernel_bo_type type);
-int vc4_dumb_create(struct drm_file *file_priv,
-                   struct drm_device *dev,
-                   struct drm_mode_create_dumb *args);
+int vc4_bo_dumb_create(struct drm_file *file_priv,
+                      struct drm_device *dev,
+                      struct drm_mode_create_dumb *args);
 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
@@ -885,6 +891,7 @@ static inline void vc4_debugfs_add_regset32(struct drm_device *drm,
 
 /* vc4_drv.c */
 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
+int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args);
 
 /* vc4_dpi.c */
 extern struct platform_driver vc4_dpi_driver;
index 9eaf304..fe10d9c 100644 (file)
@@ -76,6 +76,9 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
        u32 i;
        int ret = 0;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        if (!vc4->v3d) {
                DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
                return -ENODEV;
@@ -386,6 +389,9 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
        unsigned long timeout_expire;
        DEFINE_WAIT(wait);
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        if (vc4->finished_seqno >= seqno)
                return 0;
 
@@ -468,6 +474,9 @@ vc4_submit_next_bin_job(struct drm_device *dev)
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct vc4_exec_info *exec;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
 again:
        exec = vc4_first_bin_job(vc4);
        if (!exec)
@@ -513,6 +522,9 @@ vc4_submit_next_render_job(struct drm_device *dev)
        if (!exec)
                return;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        /* A previous RCL may have written to one of our textures, and
         * our full cache flush at bin time may have occurred before
         * that RCL completed.  Flush the texture cache now, but not
@@ -531,6 +543,9 @@ vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        bool was_empty = list_empty(&vc4->render_job_list);
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        list_move_tail(&exec->head, &vc4->render_job_list);
        if (was_empty)
                vc4_submit_next_render_job(dev);
@@ -997,6 +1012,9 @@ vc4_job_handle_completed(struct vc4_dev *vc4)
        unsigned long irqflags;
        struct vc4_seqno_cb *cb, *cb_temp;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        spin_lock_irqsave(&vc4->job_lock, irqflags);
        while (!list_empty(&vc4->job_done_list)) {
                struct vc4_exec_info *exec =
@@ -1033,6 +1051,9 @@ int vc4_queue_seqno_cb(struct drm_device *dev,
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        unsigned long irqflags;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        cb->func = func;
        INIT_WORK(&cb->work, vc4_seqno_cb_work);
 
@@ -1083,8 +1104,12 @@ int
 vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
                     struct drm_file *file_priv)
 {
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct drm_vc4_wait_seqno *args = data;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
                                               &args->timeout_ns);
 }
@@ -1093,11 +1118,15 @@ int
 vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
                  struct drm_file *file_priv)
 {
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
        int ret;
        struct drm_vc4_wait_bo *args = data;
        struct drm_gem_object *gem_obj;
        struct vc4_bo *bo;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        if (args->pad != 0)
                return -EINVAL;
 
@@ -1144,6 +1173,9 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
                                  args->shader_rec_size,
                                  args->bo_handle_count);
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        if (!vc4->v3d) {
                DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
                return -ENODEV;
@@ -1167,6 +1199,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
                DRM_ERROR("malloc failure on exec struct\n");
                return -ENOMEM;
        }
+       exec->dev = vc4;
 
        ret = vc4_v3d_pm_get(vc4);
        if (ret) {
@@ -1276,6 +1309,9 @@ int vc4_gem_init(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        vc4->dma_fence_context = dma_fence_context_alloc(1);
 
        INIT_LIST_HEAD(&vc4->bin_job_list);
@@ -1321,11 +1357,15 @@ static void vc4_gem_destroy(struct drm_device *dev, void *unused)
 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *file_priv)
 {
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct drm_vc4_gem_madvise *args = data;
        struct drm_gem_object *gem_obj;
        struct vc4_bo *bo;
        int ret;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        switch (args->madv) {
        case VC4_MADV_DONTNEED:
        case VC4_MADV_WILLNEED:
index 823d812..ce9d166 100644 (file)
@@ -1481,7 +1481,7 @@ vc4_hdmi_encoder_compute_mode_clock(const struct drm_display_mode *mode,
                                    unsigned int bpc,
                                    enum vc4_hdmi_output_format fmt)
 {
-       unsigned long long clock = mode->clock * 1000;
+       unsigned long long clock = mode->clock * 1000ULL;
 
        if (mode->flags & DRM_MODE_FLAG_DBLCLK)
                clock = clock * 2;
index 2a58fc4..ba2c8e5 100644 (file)
@@ -220,10 +220,11 @@ u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo)
 
 int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
 {
+       struct vc4_dev *vc4 = hvs->vc4;
        u32 reg;
        int ret;
 
-       if (!hvs->hvs5)
+       if (!vc4->is_vc5)
                return output;
 
        switch (output) {
@@ -273,6 +274,7 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
 static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
                                struct drm_display_mode *mode, bool oneshot)
 {
+       struct vc4_dev *vc4 = hvs->vc4;
        struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
        struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
        unsigned int chan = vc4_crtc_state->assigned_channel;
@@ -291,7 +293,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
         */
        dispctrl = SCALER_DISPCTRLX_ENABLE;
 
-       if (!hvs->hvs5)
+       if (!vc4->is_vc5)
                dispctrl |= VC4_SET_FIELD(mode->hdisplay,
                                          SCALER_DISPCTRLX_WIDTH) |
                            VC4_SET_FIELD(mode->vdisplay,
@@ -312,7 +314,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
 
        HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx |
                  SCALER_DISPBKGND_AUTOHS |
-                 ((!hvs->hvs5) ? SCALER_DISPBKGND_GAMMA : 0) |
+                 ((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) |
                  (interlace ? SCALER_DISPBKGND_INTERLACE : 0));
 
        /* Reload the LUT, since the SRAMs would have been disabled if
@@ -617,11 +619,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
        if (!hvs)
                return -ENOMEM;
 
+       hvs->vc4 = vc4;
        hvs->pdev = pdev;
 
-       if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm2711-hvs"))
-               hvs->hvs5 = true;
-
        hvs->regs = vc4_ioremap_regs(pdev, 0);
        if (IS_ERR(hvs->regs))
                return PTR_ERR(hvs->regs);
@@ -630,7 +630,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
        hvs->regset.regs = hvs_regs;
        hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
 
-       if (hvs->hvs5) {
+       if (vc4->is_vc5) {
                hvs->core_clk = devm_clk_get(&pdev->dev, NULL);
                if (IS_ERR(hvs->core_clk)) {
                        dev_err(&pdev->dev, "Couldn't get core clock\n");
@@ -644,7 +644,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
                }
        }
 
-       if (!hvs->hvs5)
+       if (!vc4->is_vc5)
                hvs->dlist = hvs->regs + SCALER_DLIST_START;
        else
                hvs->dlist = hvs->regs + SCALER5_DLIST_START;
@@ -665,7 +665,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
         * between planes when they don't overlap on the screen, but
         * for now we just allocate globally.
         */
-       if (!hvs->hvs5)
+       if (!vc4->is_vc5)
                /* 48k words of 2x12-bit pixels */
                drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
        else
index 4342fb4..2eacfb6 100644 (file)
@@ -265,6 +265,9 @@ vc4_irq_enable(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        if (!vc4->v3d)
                return;
 
@@ -279,6 +282,9 @@ vc4_irq_disable(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        if (!vc4->v3d)
                return;
 
@@ -296,8 +302,12 @@ vc4_irq_disable(struct drm_device *dev)
 
 int vc4_irq_install(struct drm_device *dev, int irq)
 {
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
        int ret;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        if (irq == IRQ_NOTCONNECTED)
                return -ENOTCONN;
 
@@ -316,6 +326,9 @@ void vc4_irq_uninstall(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        vc4_irq_disable(dev);
        free_irq(vc4->irq, dev);
 }
@@ -326,6 +339,9 @@ void vc4_irq_reset(struct drm_device *dev)
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        unsigned long irqflags;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        /* Acknowledge any stale IRQs. */
        V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
 
index c169bd7..893d831 100644 (file)
@@ -393,7 +393,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
                old_hvs_state->fifo_state[channel].pending_commit = NULL;
        }
 
-       if (vc4->hvs->hvs5) {
+       if (vc4->is_vc5) {
                unsigned long state_rate = max(old_hvs_state->core_clock_rate,
                                               new_hvs_state->core_clock_rate);
                unsigned long core_rate = max_t(unsigned long,
@@ -412,7 +412,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
 
        vc4_ctm_commit(vc4, state);
 
-       if (vc4->hvs->hvs5)
+       if (vc4->is_vc5)
                vc5_hvs_pv_muxing_commit(vc4, state);
        else
                vc4_hvs_pv_muxing_commit(vc4, state);
@@ -430,7 +430,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
 
        drm_atomic_helper_cleanup_planes(dev, state);
 
-       if (vc4->hvs->hvs5) {
+       if (vc4->is_vc5) {
                drm_dbg(dev, "Running the core clock at %lu Hz\n",
                        new_hvs_state->core_clock_rate);
 
@@ -479,8 +479,12 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
                                             struct drm_file *file_priv,
                                             const struct drm_mode_fb_cmd2 *mode_cmd)
 {
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct drm_mode_fb_cmd2 mode_cmd_local;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return ERR_PTR(-ENODEV);
+
        /* If the user didn't specify a modifier, use the
         * vc4_set_tiling_ioctl() state for the BO.
         */
@@ -997,11 +1001,15 @@ static const struct drm_mode_config_funcs vc4_mode_funcs = {
        .fb_create = vc4_fb_create,
 };
 
+static const struct drm_mode_config_funcs vc5_mode_funcs = {
+       .atomic_check = vc4_atomic_check,
+       .atomic_commit = drm_atomic_helper_commit,
+       .fb_create = drm_gem_fb_create,
+};
+
 int vc4_kms_load(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
-       bool is_vc5 = of_device_is_compatible(dev->dev->of_node,
-                                             "brcm,bcm2711-vc5");
        int ret;
 
        /*
@@ -1009,7 +1017,7 @@ int vc4_kms_load(struct drm_device *dev)
         * the BCM2711, but the load tracker computations are used for
         * the core clock rate calculation.
         */
-       if (!is_vc5) {
+       if (!vc4->is_vc5) {
                /* Start with the load tracker enabled. Can be
                 * disabled through the debugfs load_tracker file.
                 */
@@ -1025,7 +1033,7 @@ int vc4_kms_load(struct drm_device *dev)
                return ret;
        }
 
-       if (is_vc5) {
+       if (vc4->is_vc5) {
                dev->mode_config.max_width = 7680;
                dev->mode_config.max_height = 7680;
        } else {
@@ -1033,7 +1041,7 @@ int vc4_kms_load(struct drm_device *dev)
                dev->mode_config.max_height = 2048;
        }
 
-       dev->mode_config.funcs = &vc4_mode_funcs;
+       dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs;
        dev->mode_config.helper_private = &vc4_mode_config_helpers;
        dev->mode_config.preferred_depth = 24;
        dev->mode_config.async_page_flip = true;
index 18abc06..79a7418 100644 (file)
 
 void vc4_perfmon_get(struct vc4_perfmon *perfmon)
 {
-       if (perfmon)
-               refcount_inc(&perfmon->refcnt);
+       struct vc4_dev *vc4;
+
+       if (!perfmon)
+               return;
+
+       vc4 = perfmon->dev;
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
+       refcount_inc(&perfmon->refcnt);
 }
 
 void vc4_perfmon_put(struct vc4_perfmon *perfmon)
 {
-       if (perfmon && refcount_dec_and_test(&perfmon->refcnt))
+       struct vc4_dev *vc4;
+
+       if (!perfmon)
+               return;
+
+       vc4 = perfmon->dev;
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
+       if (refcount_dec_and_test(&perfmon->refcnt))
                kfree(perfmon);
 }
 
@@ -32,6 +49,9 @@ void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon)
        unsigned int i;
        u32 mask;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon))
                return;
 
@@ -49,6 +69,9 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
 {
        unsigned int i;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        if (WARN_ON_ONCE(!vc4->active_perfmon ||
                         perfmon != vc4->active_perfmon))
                return;
@@ -64,8 +87,12 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
 
 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id)
 {
+       struct vc4_dev *vc4 = vc4file->dev;
        struct vc4_perfmon *perfmon;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return NULL;
+
        mutex_lock(&vc4file->perfmon.lock);
        perfmon = idr_find(&vc4file->perfmon.idr, id);
        vc4_perfmon_get(perfmon);
@@ -76,8 +103,14 @@ struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id)
 
 void vc4_perfmon_open_file(struct vc4_file *vc4file)
 {
+       struct vc4_dev *vc4 = vc4file->dev;
+
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        mutex_init(&vc4file->perfmon.lock);
        idr_init_base(&vc4file->perfmon.idr, VC4_PERFMONID_MIN);
+       vc4file->dev = vc4;
 }
 
 static int vc4_perfmon_idr_del(int id, void *elem, void *data)
@@ -91,6 +124,11 @@ static int vc4_perfmon_idr_del(int id, void *elem, void *data)
 
 void vc4_perfmon_close_file(struct vc4_file *vc4file)
 {
+       struct vc4_dev *vc4 = vc4file->dev;
+
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        mutex_lock(&vc4file->perfmon.lock);
        idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
        idr_destroy(&vc4file->perfmon.idr);
@@ -107,6 +145,9 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
        unsigned int i;
        int ret;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        if (!vc4->v3d) {
                DRM_DEBUG("Creating perfmon no VC4 V3D probed\n");
                return -ENODEV;
@@ -127,6 +168,7 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
                          GFP_KERNEL);
        if (!perfmon)
                return -ENOMEM;
+       perfmon->dev = vc4;
 
        for (i = 0; i < req->ncounters; i++)
                perfmon->events[i] = req->events[i];
@@ -157,6 +199,9 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
        struct drm_vc4_perfmon_destroy *req = data;
        struct vc4_perfmon *perfmon;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        if (!vc4->v3d) {
                DRM_DEBUG("Destroying perfmon no VC4 V3D probed\n");
                return -ENODEV;
@@ -182,6 +227,9 @@ int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
        struct vc4_perfmon *perfmon;
        int ret;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        if (!vc4->v3d) {
                DRM_DEBUG("Getting perfmon no VC4 V3D probed\n");
                return -ENODEV;
index b3438f4..1e866dc 100644 (file)
@@ -489,10 +489,10 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
        }
 
        /* Align it to 64 or 128 (hvs5) bytes */
-       lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64);
+       lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64);
 
        /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
-       lbm /= vc4->hvs->hvs5 ? 4 : 2;
+       lbm /= vc4->is_vc5 ? 4 : 2;
 
        return lbm;
 }
@@ -608,7 +608,7 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
                ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
                                                 &vc4_state->lbm,
                                                 lbm_size,
-                                                vc4->hvs->hvs5 ? 64 : 32,
+                                                vc4->is_vc5 ? 64 : 32,
                                                 0, 0);
                spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
 
@@ -917,7 +917,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
        mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
                          fb->format->has_alpha;
 
-       if (!vc4->hvs->hvs5) {
+       if (!vc4->is_vc5) {
        /* Control word */
                vc4_dlist_write(vc4_state,
                                SCALER_CTL0_VALID |
@@ -1321,6 +1321,10 @@ static int vc4_plane_atomic_async_check(struct drm_plane *plane,
 
        old_vc4_state = to_vc4_plane_state(plane->state);
        new_vc4_state = to_vc4_plane_state(new_plane_state);
+
+       if (!new_vc4_state->hw_dlist)
+               return -EINVAL;
+
        if (old_vc4_state->dlist_count != new_vc4_state->dlist_count ||
            old_vc4_state->pos0_offset != new_vc4_state->pos0_offset ||
            old_vc4_state->pos2_offset != new_vc4_state->pos2_offset ||
@@ -1385,6 +1389,13 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
        .atomic_async_update = vc4_plane_atomic_async_update,
 };
 
+static const struct drm_plane_helper_funcs vc5_plane_helper_funcs = {
+       .atomic_check = vc4_plane_atomic_check,
+       .atomic_update = vc4_plane_atomic_update,
+       .atomic_async_check = vc4_plane_atomic_async_check,
+       .atomic_async_update = vc4_plane_atomic_async_update,
+};
+
 static bool vc4_format_mod_supported(struct drm_plane *plane,
                                     uint32_t format,
                                     uint64_t modifier)
@@ -1453,14 +1464,13 @@ static const struct drm_plane_funcs vc4_plane_funcs = {
 struct drm_plane *vc4_plane_init(struct drm_device *dev,
                                 enum drm_plane_type type)
 {
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct drm_plane *plane = NULL;
        struct vc4_plane *vc4_plane;
        u32 formats[ARRAY_SIZE(hvs_formats)];
        int num_formats = 0;
        int ret = 0;
        unsigned i;
-       bool hvs5 = of_device_is_compatible(dev->dev->of_node,
-                                           "brcm,bcm2711-vc5");
        static const uint64_t modifiers[] = {
                DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
                DRM_FORMAT_MOD_BROADCOM_SAND128,
@@ -1476,7 +1486,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
                return ERR_PTR(-ENOMEM);
 
        for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
-               if (!hvs_formats[i].hvs5_only || hvs5) {
+               if (!hvs_formats[i].hvs5_only || vc4->is_vc5) {
                        formats[num_formats] = hvs_formats[i].drm;
                        num_formats++;
                }
@@ -1490,7 +1500,10 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
        if (ret)
                return ERR_PTR(ret);
 
-       drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
+       if (vc4->is_vc5)
+               drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
+       else
+               drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
 
        drm_plane_create_alpha_property(plane);
        drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
index 3c918ee..f6b7dc3 100644 (file)
@@ -593,11 +593,15 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
 
 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
 {
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct vc4_rcl_setup setup = {0};
        struct drm_vc4_submit_cl *args = exec->args;
        bool has_bin = args->bin_cl_size != 0;
        int ret;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        if (args->min_x_tile > args->max_x_tile ||
            args->min_y_tile > args->max_y_tile) {
                DRM_DEBUG("Bad render tile set (%d,%d)-(%d,%d)\n",
index 7bb3067..cc714dc 100644 (file)
@@ -127,6 +127,9 @@ static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
 int
 vc4_v3d_pm_get(struct vc4_dev *vc4)
 {
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        mutex_lock(&vc4->power_lock);
        if (vc4->power_refcount++ == 0) {
                int ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
@@ -145,6 +148,9 @@ vc4_v3d_pm_get(struct vc4_dev *vc4)
 void
 vc4_v3d_pm_put(struct vc4_dev *vc4)
 {
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        mutex_lock(&vc4->power_lock);
        if (--vc4->power_refcount == 0) {
                pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
@@ -172,6 +178,9 @@ int vc4_v3d_get_bin_slot(struct vc4_dev *vc4)
        uint64_t seqno = 0;
        struct vc4_exec_info *exec;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
 try_again:
        spin_lock_irqsave(&vc4->job_lock, irqflags);
        slot = ffs(~vc4->bin_alloc_used);
@@ -316,6 +325,9 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used)
 {
        int ret = 0;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        mutex_lock(&vc4->bin_bo_lock);
 
        if (used && *used)
@@ -348,6 +360,9 @@ static void bin_bo_release(struct kref *ref)
 
 void vc4_v3d_bin_bo_put(struct vc4_dev *vc4)
 {
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return;
+
        mutex_lock(&vc4->bin_bo_lock);
        kref_put(&vc4->bin_bo_kref, bin_bo_release);
        mutex_unlock(&vc4->bin_bo_lock);
index eec76af..2feba55 100644 (file)
@@ -105,9 +105,13 @@ size_is_lt(uint32_t width, uint32_t height, int cpp)
 struct drm_gem_cma_object *
 vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
 {
+       struct vc4_dev *vc4 = exec->dev;
        struct drm_gem_cma_object *obj;
        struct vc4_bo *bo;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return NULL;
+
        if (hindex >= exec->bo_count) {
                DRM_DEBUG("BO index %d greater than BO count %d\n",
                          hindex, exec->bo_count);
@@ -160,10 +164,14 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
                   uint32_t offset, uint8_t tiling_format,
                   uint32_t width, uint32_t height, uint8_t cpp)
 {
+       struct vc4_dev *vc4 = exec->dev;
        uint32_t aligned_width, aligned_height, stride, size;
        uint32_t utile_w = utile_width(cpp);
        uint32_t utile_h = utile_height(cpp);
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return false;
+
        /* The shaded vertex format stores signed 12.4 fixed point
         * (-2048,2047) offsets from the viewport center, so we should
         * never have a render target larger than 4096.  The texture
@@ -482,10 +490,14 @@ vc4_validate_bin_cl(struct drm_device *dev,
                    void *unvalidated,
                    struct vc4_exec_info *exec)
 {
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
        uint32_t len = exec->args->bin_cl_size;
        uint32_t dst_offset = 0;
        uint32_t src_offset = 0;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        while (src_offset < len) {
                void *dst_pkt = validated + dst_offset;
                void *src_pkt = unvalidated + src_offset;
@@ -926,9 +938,13 @@ int
 vc4_validate_shader_recs(struct drm_device *dev,
                         struct vc4_exec_info *exec)
 {
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
        uint32_t i;
        int ret = 0;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return -ENODEV;
+
        for (i = 0; i < exec->shader_state_count; i++) {
                ret = validate_gl_shader_rec(dev, exec, &exec->shader_state[i]);
                if (ret)
index 7cf82b0..e315aeb 100644 (file)
@@ -778,6 +778,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
 struct vc4_validated_shader_info *
 vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
 {
+       struct vc4_dev *vc4 = to_vc4_dev(shader_obj->base.dev);
        bool found_shader_end = false;
        int shader_end_ip = 0;
        uint32_t last_thread_switch_ip = -3;
@@ -785,6 +786,9 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
        struct vc4_validated_shader_info *validated_shader = NULL;
        struct vc4_shader_validation_state validation_state;
 
+       if (WARN_ON_ONCE(vc4->is_vc5))
+               return NULL;
+
        memset(&validation_state, 0, sizeof(validation_state));
        validation_state.shader = shader_obj->vaddr;
        validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t);
index 5a5bf4e..e31554d 100644 (file)
@@ -71,7 +71,7 @@ static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
         * the whole buffer.
         */
        vma->vm_flags &= ~VM_PFNMAP;
-       vma->vm_flags |= VM_MIXEDMAP;
+       vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
        vma->vm_pgoff = 0;
 
        /*
index 978ee2a..e0bc731 100644 (file)
@@ -199,7 +199,8 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
        if (!input_device->hid_desc)
                goto cleanup;
 
-       input_device->report_desc_size = desc->desc[0].wDescriptorLength;
+       input_device->report_desc_size = le16_to_cpu(
+                                       desc->desc[0].wDescriptorLength);
        if (input_device->report_desc_size == 0) {
                input_device->dev_info_status = -EINVAL;
                goto cleanup;
@@ -217,7 +218,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
 
        memcpy(input_device->report_desc,
               ((unsigned char *)desc) + desc->bLength,
-              desc->desc[0].wDescriptorLength);
+              le16_to_cpu(desc->desc[0].wDescriptorLength));
 
        /* Send the ack */
        memset(&ack, 0, sizeof(struct mousevsc_prt_msg));
index b60f134..5b12040 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/cpu.h>
 #include <linux/hyperv.h>
 #include <asm/mshyperv.h>
+#include <linux/sched/isolation.h>
 
 #include "hyperv_vmbus.h"
 
@@ -638,6 +639,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
                 */
                if (newchannel->offermsg.offer.sub_channel_index == 0) {
                        mutex_unlock(&vmbus_connection.channel_mutex);
+                       cpus_read_unlock();
                        /*
                         * Don't call free_channel(), because newchannel->kobj
                         * is not initialized yet.
@@ -728,16 +730,20 @@ static void init_vp_index(struct vmbus_channel *channel)
        u32 i, ncpu = num_online_cpus();
        cpumask_var_t available_mask;
        struct cpumask *allocated_mask;
+       const struct cpumask *hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
        u32 target_cpu;
        int numa_node;
 
        if (!perf_chn ||
-           !alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
+           !alloc_cpumask_var(&available_mask, GFP_KERNEL) ||
+           cpumask_empty(hk_mask)) {
                /*
                 * If the channel is not a performance critical
                 * channel, bind it to VMBUS_CONNECT_CPU.
                 * In case alloc_cpumask_var() fails, bind it to
                 * VMBUS_CONNECT_CPU.
+                * If all the cpus are isolated, bind it to
+                * VMBUS_CONNECT_CPU.
                 */
                channel->target_cpu = VMBUS_CONNECT_CPU;
                if (perf_chn)
@@ -758,17 +764,19 @@ static void init_vp_index(struct vmbus_channel *channel)
                }
                allocated_mask = &hv_context.hv_numa_map[numa_node];
 
-               if (cpumask_equal(allocated_mask, cpumask_of_node(numa_node))) {
+retry:
+               cpumask_xor(available_mask, allocated_mask, cpumask_of_node(numa_node));
+               cpumask_and(available_mask, available_mask, hk_mask);
+
+               if (cpumask_empty(available_mask)) {
                        /*
                         * We have cycled through all the CPUs in the node;
                         * reset the allocated map.
                         */
                        cpumask_clear(allocated_mask);
+                       goto retry;
                }
 
-               cpumask_xor(available_mask, allocated_mask,
-                           cpumask_of_node(numa_node));
-
                target_cpu = cpumask_first(available_mask);
                cpumask_set_cpu(target_cpu, allocated_mask);
 
index c698592..d35b60c 100644 (file)
@@ -394,7 +394,7 @@ kvp_send_key(struct work_struct *dummy)
        in_msg = kvp_transaction.kvp_msg;
 
        /*
-        * The key/value strings sent from the host are encoded in
+        * The key/value strings sent from the host are encoded
         * in utf16; convert it to utf8 strings.
         * The host assures us that the utf16 strings will not exceed
         * the max lengths specified. We will however, reserve room
index 714d549..547ae33 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/clockchips.h>
 #include <linux/cpu.h>
+#include <linux/sched/isolation.h>
 #include <linux/sched/task_stack.h>
 
 #include <linux/delay.h>
@@ -1770,6 +1771,9 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
        if (target_cpu >= nr_cpumask_bits)
                return -EINVAL;
 
+       if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)))
+               return -EINVAL;
+
        /* No CPUs should come up or down during this. */
        cpus_read_lock();
 
index 57e11b2..3633ab6 100644 (file)
@@ -259,7 +259,7 @@ static const struct ec_board_info board_info[] = {
        },
        {
                .board_names = {
-                       "ROG CROSSHAIR VIII FORMULA"
+                       "ROG CROSSHAIR VIII FORMULA",
                        "ROG CROSSHAIR VIII HERO",
                        "ROG CROSSHAIR VIII HERO (WI-FI)",
                },
index 5c4cf74..157e232 100644 (file)
@@ -550,7 +550,7 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle)
 
        res = platform_device_add(data->pdev);
        if (res)
-               goto ipmi_err;
+               goto dev_add_err;
 
        platform_set_drvdata(data->pdev, data);
 
@@ -598,7 +598,9 @@ hwmon_reg_err:
        ipmi_destroy_user(data->ipmi.user);
 ipmi_err:
        platform_set_drvdata(data->pdev, NULL);
-       platform_device_unregister(data->pdev);
+       platform_device_del(data->pdev);
+dev_add_err:
+       platform_device_put(data->pdev);
 dev_err:
        ida_free(&aem_ida, data->id);
 id_err:
@@ -690,7 +692,7 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe,
 
        res = platform_device_add(data->pdev);
        if (res)
-               goto ipmi_err;
+               goto dev_add_err;
 
        platform_set_drvdata(data->pdev, data);
 
@@ -738,7 +740,9 @@ hwmon_reg_err:
        ipmi_destroy_user(data->ipmi.user);
 ipmi_err:
        platform_set_drvdata(data->pdev, NULL);
-       platform_device_unregister(data->pdev);
+       platform_device_del(data->pdev);
+dev_add_err:
+       platform_device_put(data->pdev);
 dev_err:
        ida_free(&aem_ida, data->id);
 id_err:
index 4e239bd..5a9d47a 100644 (file)
@@ -428,6 +428,10 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        data->ccd_offset = 0x154;
                        k10temp_get_ccd_support(pdev, data, 8);
                        break;
+               case 0xa0 ... 0xaf:
+                       data->ccd_offset = 0x300;
+                       k10temp_get_ccd_support(pdev, data, 8);
+                       break;
                }
        } else if (boot_cpu_data.x86 == 0x19) {
                data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK;
@@ -445,6 +449,11 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        data->ccd_offset = 0x300;
                        k10temp_get_ccd_support(pdev, data, 8);
                        break;
+               case 0x60 ... 0x6f:
+               case 0x70 ... 0x7f:
+                       data->ccd_offset = 0x308;
+                       k10temp_get_ccd_support(pdev, data, 8);
+                       break;
                case 0x10 ... 0x1f:
                case 0xa0 ... 0xaf:
                        data->ccd_offset = 0x300;
@@ -489,10 +498,13 @@ static const struct pci_device_id k10temp_id_table[] = {
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
+       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
+       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
+       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
        { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
        {}
 };
index d78f4be..157b73a 100644 (file)
@@ -145,7 +145,7 @@ static int occ_poll(struct occ *occ)
        cmd[6] = 0;                     /* checksum lsb */
 
        /* mutex should already be locked if necessary */
-       rc = occ->send_cmd(occ, cmd, sizeof(cmd));
+       rc = occ->send_cmd(occ, cmd, sizeof(cmd), &occ->resp, sizeof(occ->resp));
        if (rc) {
                occ->last_error = rc;
                if (occ->error_count++ > OCC_ERROR_COUNT_THRESHOLD)
@@ -182,6 +182,7 @@ static int occ_set_user_power_cap(struct occ *occ, u16 user_power_cap)
 {
        int rc;
        u8 cmd[8];
+       u8 resp[8];
        __be16 user_power_cap_be = cpu_to_be16(user_power_cap);
 
        cmd[0] = 0;     /* sequence number */
@@ -198,7 +199,7 @@ static int occ_set_user_power_cap(struct occ *occ, u16 user_power_cap)
        if (rc)
                return rc;
 
-       rc = occ->send_cmd(occ, cmd, sizeof(cmd));
+       rc = occ->send_cmd(occ, cmd, sizeof(cmd), resp, sizeof(resp));
 
        mutex_unlock(&occ->lock);
 
@@ -1228,10 +1229,15 @@ EXPORT_SYMBOL_GPL(occ_setup);
 
 void occ_shutdown(struct occ *occ)
 {
+       mutex_lock(&occ->lock);
+
        occ_shutdown_sysfs(occ);
 
        if (occ->hwmon)
                hwmon_device_unregister(occ->hwmon);
+       occ->hwmon = NULL;
+
+       mutex_unlock(&occ->lock);
 }
 EXPORT_SYMBOL_GPL(occ_shutdown);
 
index 64d5ec7..7ac4b2f 100644 (file)
@@ -96,7 +96,8 @@ struct occ {
 
        int powr_sample_time_us;        /* average power sample time */
        u8 poll_cmd_data;               /* to perform OCC poll command */
-       int (*send_cmd)(struct occ *occ, u8 *cmd, size_t len);
+       int (*send_cmd)(struct occ *occ, u8 *cmd, size_t len, void *resp,
+                       size_t resp_len);
 
        unsigned long next_update;
        struct mutex lock;              /* lock OCC access */
index da39ea2..b221be1 100644 (file)
@@ -111,7 +111,8 @@ static int p8_i2c_occ_putscom_be(struct i2c_client *client, u32 address,
                                      be32_to_cpu(data1));
 }
 
-static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
+static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len,
+                              void *resp, size_t resp_len)
 {
        int i, rc;
        unsigned long start;
@@ -120,7 +121,7 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
        const long wait_time = msecs_to_jiffies(OCC_CMD_IN_PRG_WAIT_MS);
        struct p8_i2c_occ *ctx = to_p8_i2c_occ(occ);
        struct i2c_client *client = ctx->client;
-       struct occ_response *resp = &occ->resp;
+       struct occ_response *or = (struct occ_response *)resp;
 
        start = jiffies;
 
@@ -151,7 +152,7 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
                        return rc;
 
                /* wait for OCC */
-               if (resp->return_status == OCC_RESP_CMD_IN_PRG) {
+               if (or->return_status == OCC_RESP_CMD_IN_PRG) {
                        rc = -EALREADY;
 
                        if (time_after(jiffies, start + timeout))
@@ -163,7 +164,7 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
        } while (rc);
 
        /* check the OCC response */
-       switch (resp->return_status) {
+       switch (or->return_status) {
        case OCC_RESP_CMD_IN_PRG:
                rc = -ETIMEDOUT;
                break;
@@ -192,8 +193,8 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
        if (rc < 0)
                return rc;
 
-       data_length = get_unaligned_be16(&resp->data_length);
-       if (data_length > OCC_RESP_DATA_BYTES)
+       data_length = get_unaligned_be16(&or->data_length);
+       if ((data_length + 7) > resp_len)
                return -EMSGSIZE;
 
        /* fetch the rest of the response data */
index 42fc7b9..a91937e 100644 (file)
@@ -78,11 +78,10 @@ done:
        return notify;
 }
 
-static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
+static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len,
+                              void *resp, size_t resp_len)
 {
-       struct occ_response *resp = &occ->resp;
        struct p9_sbe_occ *ctx = to_p9_sbe_occ(occ);
-       size_t resp_len = sizeof(*resp);
        int rc;
 
        rc = fsi_occ_submit(ctx->sbe, cmd, len, resp, &resp_len);
@@ -96,7 +95,7 @@ static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
                return rc;
        }
 
-       switch (resp->return_status) {
+       switch (((struct occ_response *)resp)->return_status) {
        case OCC_RESP_CMD_IN_PRG:
                rc = -ETIMEDOUT;
                break;
index 6bc3273..3ad375a 100644 (file)
@@ -148,7 +148,7 @@ static int ucd9200_probe(struct i2c_client *client)
         * This only affects the READ_IOUT and READ_TEMPERATURE2 registers.
         * READ_IOUT will return the sum of currents of all phases of a rail,
         * and READ_TEMPERATURE2 will return the maximum temperature detected
-        * for the the phases of the rail.
+        * for the phases of the rail.
         */
        for (i = 0; i < info->pages; i++) {
                /*
index b4c1ad1..630cfa4 100644 (file)
@@ -388,9 +388,9 @@ static irqreturn_t cdns_i2c_slave_isr(void *ptr)
  */
 static irqreturn_t cdns_i2c_master_isr(void *ptr)
 {
-       unsigned int isr_status, avail_bytes, updatetx;
+       unsigned int isr_status, avail_bytes;
        unsigned int bytes_to_send;
-       bool hold_quirk;
+       bool updatetx;
        struct cdns_i2c *id = ptr;
        /* Signal completion only after everything is updated */
        int done_flag = 0;
@@ -410,11 +410,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
         * Check if transfer size register needs to be updated again for a
         * large data receive operation.
         */
-       updatetx = 0;
-       if (id->recv_count > id->curr_recv_count)
-               updatetx = 1;
-
-       hold_quirk = (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) && updatetx;
+       updatetx = id->recv_count > id->curr_recv_count;
 
        /* When receiving, handle data interrupt and completion interrupt */
        if (id->p_recv_buf &&
@@ -445,7 +441,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
                                break;
                        }
 
-                       if (cdns_is_holdquirk(id, hold_quirk))
+                       if (cdns_is_holdquirk(id, updatetx))
                                break;
                }
 
@@ -456,7 +452,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
                 * maintain transfer size non-zero while performing a large
                 * receive operation.
                 */
-               if (cdns_is_holdquirk(id, hold_quirk)) {
+               if (cdns_is_holdquirk(id, updatetx)) {
                        /* wait while fifo is full */
                        while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
                               (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
@@ -478,22 +474,6 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
                                                  CDNS_I2C_XFER_SIZE_OFFSET);
                                id->curr_recv_count = id->recv_count;
                        }
-               } else if (id->recv_count && !hold_quirk &&
-                                               !id->curr_recv_count) {
-
-                       /* Set the slave address in address register*/
-                       cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
-                                               CDNS_I2C_ADDR_OFFSET);
-
-                       if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
-                               cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
-                                               CDNS_I2C_XFER_SIZE_OFFSET);
-                               id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
-                       } else {
-                               cdns_i2c_writereg(id->recv_count,
-                                               CDNS_I2C_XFER_SIZE_OFFSET);
-                               id->curr_recv_count = id->recv_count;
-                       }
                }
 
                /* Clear hold (if not repeated start) and signal completion */
@@ -1338,6 +1318,7 @@ static int cdns_i2c_probe(struct platform_device *pdev)
        return 0;
 
 err_clk_dis:
+       clk_notifier_unregister(id->clk, &id->clk_rate_change_nb);
        clk_disable_unprepare(id->clk);
        pm_runtime_disable(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
index e7d316b..c023b69 100644 (file)
@@ -477,9 +477,6 @@ int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare)
 {
        int ret;
 
-       if (IS_ERR(dev->clk))
-               return PTR_ERR(dev->clk);
-
        if (prepare) {
                /* Optional interface clock */
                ret = clk_prepare_enable(dev->pclk);
index 70ade53..ba043b5 100644 (file)
@@ -320,8 +320,17 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
                goto exit_reset;
        }
 
-       dev->clk = devm_clk_get(&pdev->dev, NULL);
-       if (!i2c_dw_prepare_clk(dev, true)) {
+       dev->clk = devm_clk_get_optional(&pdev->dev, NULL);
+       if (IS_ERR(dev->clk)) {
+               ret = PTR_ERR(dev->clk);
+               goto exit_reset;
+       }
+
+       ret = i2c_dw_prepare_clk(dev, true);
+       if (ret)
+               goto exit_reset;
+
+       if (dev->clk) {
                u64 clk_khz;
 
                dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
index e9e2db6..78fb1a4 100644 (file)
@@ -66,7 +66,7 @@
 
 /* IMX I2C registers:
  * the I2C register offset is different between SoCs,
- * to provid support for all these chips, split the
+ * to provide support for all these chips, split the
  * register offset into a fixed base address and a
  * variable shift value, then the full register offset
  * will be calculated by
index 56aa424..815cc56 100644 (file)
@@ -49,7 +49,7 @@
 #define MLXCPLD_LPCI2C_NACK_IND                2
 
 #define MLXCPLD_I2C_FREQ_1000KHZ_SET   0x04
-#define MLXCPLD_I2C_FREQ_400KHZ_SET    0x0c
+#define MLXCPLD_I2C_FREQ_400KHZ_SET    0x0e
 #define MLXCPLD_I2C_FREQ_100KHZ_SET    0x42
 
 enum mlxcpld_i2c_frequency {
index bdecb78..8e69853 100644 (file)
@@ -1420,17 +1420,22 @@ static int mtk_i2c_probe(struct platform_device *pdev)
        if (ret < 0) {
                dev_err(&pdev->dev,
                        "Request I2C IRQ %d fail\n", irq);
-               return ret;
+               goto err_bulk_unprepare;
        }
 
        i2c_set_adapdata(&i2c->adap, i2c);
        ret = i2c_add_adapter(&i2c->adap);
        if (ret)
-               return ret;
+               goto err_bulk_unprepare;
 
        platform_set_drvdata(pdev, i2c);
 
        return 0;
+
+err_bulk_unprepare:
+       clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks);
+
+       return ret;
 }
 
 static int mtk_i2c_remove(struct platform_device *pdev)
index 5960ccd..aede9d5 100644 (file)
@@ -2372,8 +2372,7 @@ static struct platform_driver npcm_i2c_bus_driver = {
 static int __init npcm_i2c_init(void)
 {
        npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL);
-       platform_driver_register(&npcm_i2c_bus_driver);
-       return 0;
+       return platform_driver_register(&npcm_i2c_bus_driver);
 }
 module_init(npcm_i2c_init);
 
index ac8e7d6..39cb1b7 100644 (file)
@@ -161,7 +161,6 @@ static const char *piix4_aux_port_name_sb800 = " port 1";
 
 struct sb800_mmio_cfg {
        void __iomem *addr;
-       struct resource *res;
        bool use_mmio;
 };
 
@@ -179,13 +178,11 @@ static int piix4_sb800_region_request(struct device *dev,
                                      struct sb800_mmio_cfg *mmio_cfg)
 {
        if (mmio_cfg->use_mmio) {
-               struct resource *res;
                void __iomem *addr;
 
-               res = request_mem_region_muxed(SB800_PIIX4_FCH_PM_ADDR,
-                                              SB800_PIIX4_FCH_PM_SIZE,
-                                              "sb800_piix4_smb");
-               if (!res) {
+               if (!request_mem_region_muxed(SB800_PIIX4_FCH_PM_ADDR,
+                                             SB800_PIIX4_FCH_PM_SIZE,
+                                             "sb800_piix4_smb")) {
                        dev_err(dev,
                                "SMBus base address memory region 0x%x already in use.\n",
                                SB800_PIIX4_FCH_PM_ADDR);
@@ -195,12 +192,12 @@ static int piix4_sb800_region_request(struct device *dev,
                addr = ioremap(SB800_PIIX4_FCH_PM_ADDR,
                               SB800_PIIX4_FCH_PM_SIZE);
                if (!addr) {
-                       release_resource(res);
+                       release_mem_region(SB800_PIIX4_FCH_PM_ADDR,
+                                          SB800_PIIX4_FCH_PM_SIZE);
                        dev_err(dev, "SMBus base address mapping failed.\n");
                        return -ENOMEM;
                }
 
-               mmio_cfg->res = res;
                mmio_cfg->addr = addr;
 
                return 0;
@@ -222,7 +219,8 @@ static void piix4_sb800_region_release(struct device *dev,
 {
        if (mmio_cfg->use_mmio) {
                iounmap(mmio_cfg->addr);
-               release_resource(mmio_cfg->res);
+               release_mem_region(SB800_PIIX4_FCH_PM_ADDR,
+                                  SB800_PIIX4_FCH_PM_SIZE);
                return;
        }
 
index 424ef47..445b19d 100644 (file)
 #include <linux/tick.h>
 #include <trace/events/power.h>
 #include <linux/sched.h>
+#include <linux/sched/smt.h>
 #include <linux/notifier.h>
 #include <linux/cpu.h>
 #include <linux/moduleparam.h>
 #include <asm/cpu_device_id.h>
 #include <asm/intel-family.h>
+#include <asm/nospec-branch.h>
 #include <asm/mwait.h>
 #include <asm/msr.h>
+#include <asm/fpu/api.h>
 
 #define INTEL_IDLE_VERSION "0.5.1"
 
@@ -106,6 +109,17 @@ static unsigned int mwait_substates __initdata;
 #define CPUIDLE_FLAG_ALWAYS_ENABLE     BIT(15)
 
 /*
+ * Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE
+ * above.
+ */
+#define CPUIDLE_FLAG_IBRS              BIT(16)
+
+/*
+ * Initialize large xstate for the C6-state entrance.
+ */
+#define CPUIDLE_FLAG_INIT_XSTATE       BIT(17)
+
+/*
  * MWAIT takes an 8-bit "hint" in EAX "suggesting"
  * the C-state (top nibble) and sub-state (bottom nibble)
  * 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
@@ -154,11 +168,42 @@ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
 
        raw_local_irq_enable();
        ret = __intel_idle(dev, drv, index);
-       raw_local_irq_disable();
+
+       /*
+        * The lockdep hardirqs state may be changed to 'on' with timer
+        * tick interrupt followed by __do_softirq(). Use local_irq_disable()
+        * to keep the hardirqs state correct.
+        */
+       local_irq_disable();
+
+       return ret;
+}
+
+static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
+                                    struct cpuidle_driver *drv, int index)
+{
+       bool smt_active = sched_smt_active();
+       u64 spec_ctrl = spec_ctrl_current();
+       int ret;
+
+       if (smt_active)
+               wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+
+       ret = __intel_idle(dev, drv, index);
+
+       if (smt_active)
+               wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
 
        return ret;
 }
 
+static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev,
+                                      struct cpuidle_driver *drv, int index)
+{
+       fpu_idle_fpregs();
+       return __intel_idle(dev, drv, index);
+}
+
 /**
  * intel_idle_s2idle - Ask the processor to enter the given idle state.
  * @dev: cpuidle device of the target CPU.
@@ -174,8 +219,12 @@ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
 static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
                                       struct cpuidle_driver *drv, int index)
 {
-       unsigned long eax = flg2MWAIT(drv->states[index].flags);
        unsigned long ecx = 1; /* break on interrupt flag */
+       struct cpuidle_state *state = &drv->states[index];
+       unsigned long eax = flg2MWAIT(state->flags);
+
+       if (state->flags & CPUIDLE_FLAG_INIT_XSTATE)
+               fpu_idle_fpregs();
 
        mwait_idle_with_hints(eax, ecx);
 
@@ -680,7 +729,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
        {
                .name = "C6",
                .desc = "MWAIT 0x20",
-               .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
                .exit_latency = 85,
                .target_residency = 200,
                .enter = &intel_idle,
@@ -688,7 +737,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
        {
                .name = "C7s",
                .desc = "MWAIT 0x33",
-               .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
                .exit_latency = 124,
                .target_residency = 800,
                .enter = &intel_idle,
@@ -696,7 +745,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
        {
                .name = "C8",
                .desc = "MWAIT 0x40",
-               .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
                .exit_latency = 200,
                .target_residency = 800,
                .enter = &intel_idle,
@@ -704,7 +753,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
        {
                .name = "C9",
                .desc = "MWAIT 0x50",
-               .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
                .exit_latency = 480,
                .target_residency = 5000,
                .enter = &intel_idle,
@@ -712,7 +761,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
        {
                .name = "C10",
                .desc = "MWAIT 0x60",
-               .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
                .exit_latency = 890,
                .target_residency = 5000,
                .enter = &intel_idle,
@@ -741,7 +790,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
        {
                .name = "C6",
                .desc = "MWAIT 0x20",
-               .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
                .exit_latency = 133,
                .target_residency = 600,
                .enter = &intel_idle,
@@ -910,7 +959,8 @@ static struct cpuidle_state spr_cstates[] __initdata = {
        {
                .name = "C6",
                .desc = "MWAIT 0x20",
-               .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED |
+                                          CPUIDLE_FLAG_INIT_XSTATE,
                .exit_latency = 290,
                .target_residency = 800,
                .enter = &intel_idle,
@@ -1819,6 +1869,15 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
                if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE)
                        drv->states[drv->state_count].enter = intel_idle_irq;
 
+               if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) &&
+                   cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IBRS) {
+                       WARN_ON_ONCE(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE);
+                       drv->states[drv->state_count].enter = intel_idle_ibrs;
+               }
+
+               if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_INIT_XSTATE)
+                       drv->states[drv->state_count].enter = intel_idle_xstate;
+
                if ((disabled_states_mask & BIT(drv->state_count)) ||
                    ((icpu->use_acpi || force_use_acpi) &&
                     intel_idle_off_by_default(mwait_hint) &&
index 4f73bc8..9c9e985 100644 (file)
@@ -1006,11 +1006,12 @@ static int bma180_probe(struct i2c_client *client,
 
                data->trig->ops = &bma180_trigger_ops;
                iio_trigger_set_drvdata(data->trig, indio_dev);
-               indio_dev->trig = iio_trigger_get(data->trig);
 
                ret = iio_trigger_register(data->trig);
                if (ret)
                        goto err_trigger_free;
+
+               indio_dev->trig = iio_trigger_get(data->trig);
        }
 
        ret = iio_triggered_buffer_setup(indio_dev, NULL,
index ac74cdc..748b35c 100644 (file)
@@ -1554,12 +1554,12 @@ static int kxcjk1013_probe(struct i2c_client *client,
 
                data->dready_trig->ops = &kxcjk1013_trigger_ops;
                iio_trigger_set_drvdata(data->dready_trig, indio_dev);
-               indio_dev->trig = data->dready_trig;
-               iio_trigger_get(indio_dev->trig);
                ret = iio_trigger_register(data->dready_trig);
                if (ret)
                        goto err_poweroff;
 
+               indio_dev->trig = iio_trigger_get(data->dready_trig);
+
                data->motion_trig->ops = &kxcjk1013_trigger_ops;
                iio_trigger_set_drvdata(data->motion_trig, indio_dev);
                ret = iio_trigger_register(data->motion_trig);
index 912a447..c7d9ca9 100644 (file)
@@ -1511,10 +1511,14 @@ static int mma8452_reset(struct i2c_client *client)
        int i;
        int ret;
 
-       ret = i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2,
+       /*
+        * Find on fxls8471, after config reset bit, it reset immediately,
+        * and will not give ACK, so here do not check the return value.
+        * The following code will read the reset register, and check whether
+        * this reset works.
+        */
+       i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2,
                                        MMA8452_CTRL_REG2_RST);
-       if (ret < 0)
-               return ret;
 
        for (i = 0; i < 10; i++) {
                usleep_range(100, 200);
@@ -1557,11 +1561,13 @@ static int mma8452_probe(struct i2c_client *client,
        mutex_init(&data->lock);
 
        data->chip_info = device_get_match_data(&client->dev);
-       if (!data->chip_info && id) {
-               data->chip_info = &mma_chip_info_table[id->driver_data];
-       } else {
-               dev_err(&client->dev, "unknown device model\n");
-               return -ENODEV;
+       if (!data->chip_info) {
+               if (id) {
+                       data->chip_info = &mma_chip_info_table[id->driver_data];
+               } else {
+                       dev_err(&client->dev, "unknown device model\n");
+                       return -ENODEV;
+               }
        }
 
        ret = iio_read_mount_matrix(&client->dev, &data->orientation);
index b3afbf0..df600d2 100644 (file)
@@ -456,8 +456,6 @@ static int mxc4005_probe(struct i2c_client *client,
 
                data->dready_trig->ops = &mxc4005_trigger_ops;
                iio_trigger_set_drvdata(data->dready_trig, indio_dev);
-               indio_dev->trig = data->dready_trig;
-               iio_trigger_get(indio_dev->trig);
                ret = devm_iio_trigger_register(&client->dev,
                                                data->dready_trig);
                if (ret) {
@@ -465,6 +463,8 @@ static int mxc4005_probe(struct i2c_client *client,
                                "failed to register trigger\n");
                        return ret;
                }
+
+               indio_dev->trig = iio_trigger_get(data->dready_trig);
        }
 
        return devm_iio_device_register(&client->dev, indio_dev);
index a73e3c2..a9e655e 100644 (file)
@@ -322,16 +322,19 @@ static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev)
 
                if (!try_module_get(cl->dev->driver->owner)) {
                        mutex_unlock(&registered_clients_lock);
+                       of_node_put(cln);
                        return ERR_PTR(-ENODEV);
                }
 
                get_device(cl->dev);
                cl->info = info;
                mutex_unlock(&registered_clients_lock);
+               of_node_put(cln);
                return cl;
        }
 
        mutex_unlock(&registered_clients_lock);
+       of_node_put(cln);
 
        return ERR_PTR(-EPROBE_DEFER);
 }
index 0793d24..9341e0e 100644 (file)
@@ -186,6 +186,7 @@ static int aspeed_adc_set_trim_data(struct iio_dev *indio_dev)
                return -EOPNOTSUPP;
        }
        scu = syscon_node_to_regmap(syscon);
+       of_node_put(syscon);
        if (IS_ERR(scu)) {
                dev_warn(data->dev, "Failed to get syscon regmap\n");
                return -EOPNOTSUPP;
index a4b8be5..580361b 100644 (file)
@@ -196,6 +196,14 @@ static const struct dmi_system_id axp288_adc_ts_bias_override[] = {
                },
                .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
        },
+       {
+               /* Nuvision Solo 10 Draw */
+               .matches = {
+                 DMI_MATCH(DMI_SYS_VENDOR, "TMAX"),
+                 DMI_MATCH(DMI_PRODUCT_NAME, "TM101W610L"),
+               },
+               .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
+       },
        {}
 };
 
index 7585144..5b09a93 100644 (file)
@@ -334,11 +334,15 @@ static int rzg2l_adc_parse_properties(struct platform_device *pdev, struct rzg2l
        i = 0;
        device_for_each_child_node(&pdev->dev, fwnode) {
                ret = fwnode_property_read_u32(fwnode, "reg", &channel);
-               if (ret)
+               if (ret) {
+                       fwnode_handle_put(fwnode);
                        return ret;
+               }
 
-               if (channel >= RZG2L_ADC_MAX_CHANNELS)
+               if (channel >= RZG2L_ADC_MAX_CHANNELS) {
+                       fwnode_handle_put(fwnode);
                        return -EINVAL;
+               }
 
                chan_array[i].type = IIO_VOLTAGE;
                chan_array[i].indexed = 1;
index 1426562..3efb8c4 100644 (file)
@@ -64,6 +64,7 @@ struct stm32_adc_priv;
  * @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet)
  * @has_syscfg: SYSCFG capability flags
  * @num_irqs:  number of interrupt lines
+ * @num_adcs:   maximum number of ADC instances in the common registers
  */
 struct stm32_adc_priv_cfg {
        const struct stm32_adc_common_regs *regs;
@@ -71,6 +72,7 @@ struct stm32_adc_priv_cfg {
        u32 max_clk_rate_hz;
        unsigned int has_syscfg;
        unsigned int num_irqs;
+       unsigned int num_adcs;
 };
 
 /**
@@ -352,7 +354,7 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
         * before invoking the interrupt handler (e.g. call ISR only for
         * IRQ-enabled ADCs).
         */
-       for (i = 0; i < priv->cfg->num_irqs; i++) {
+       for (i = 0; i < priv->cfg->num_adcs; i++) {
                if ((status & priv->cfg->regs->eoc_msk[i] &&
                     stm32_adc_eoc_enabled(priv, i)) ||
                     (status & priv->cfg->regs->ovr_msk[i]))
@@ -792,6 +794,7 @@ static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = {
        .clk_sel = stm32f4_adc_clk_sel,
        .max_clk_rate_hz = 36000000,
        .num_irqs = 1,
+       .num_adcs = 3,
 };
 
 static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
@@ -800,14 +803,16 @@ static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
        .max_clk_rate_hz = 36000000,
        .has_syscfg = HAS_VBOOSTER,
        .num_irqs = 1,
+       .num_adcs = 2,
 };
 
 static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = {
        .regs = &stm32h7_adc_common_regs,
        .clk_sel = stm32h7_adc_clk_sel,
-       .max_clk_rate_hz = 40000000,
+       .max_clk_rate_hz = 36000000,
        .has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD,
        .num_irqs = 2,
+       .num_adcs = 2,
 };
 
 static const struct of_device_id stm32_adc_of_match[] = {
index a68ecbd..11ef873 100644 (file)
@@ -1365,7 +1365,7 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev,
                else
                        ret = -EINVAL;
 
-               if (mask == IIO_CHAN_INFO_PROCESSED && adc->vrefint.vrefint_cal)
+               if (mask == IIO_CHAN_INFO_PROCESSED)
                        *val = STM32_ADC_VREFINT_VOLTAGE * adc->vrefint.vrefint_cal / *val;
 
                iio_device_release_direct_mode(indio_dev);
@@ -1407,7 +1407,6 @@ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data)
        struct stm32_adc *adc = iio_priv(indio_dev);
        const struct stm32_adc_regspec *regs = adc->cfg->regs;
        u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
-       u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg);
 
        /* Check ovr status right now, as ovr mask should be already disabled */
        if (status & regs->isr_ovr.mask) {
@@ -1422,11 +1421,6 @@ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data)
                return IRQ_HANDLED;
        }
 
-       if (!(status & mask))
-               dev_err_ratelimited(&indio_dev->dev,
-                                   "Unexpected IRQ: IER=0x%08x, ISR=0x%08x\n",
-                                   mask, status);
-
        return IRQ_NONE;
 }
 
@@ -1436,10 +1430,6 @@ static irqreturn_t stm32_adc_isr(int irq, void *data)
        struct stm32_adc *adc = iio_priv(indio_dev);
        const struct stm32_adc_regspec *regs = adc->cfg->regs;
        u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
-       u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg);
-
-       if (!(status & mask))
-               return IRQ_WAKE_THREAD;
 
        if (status & regs->isr_ovr.mask) {
                /*
@@ -1979,10 +1969,10 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n
 
        for (i = 0; i < STM32_ADC_INT_CH_NB; i++) {
                if (!strncmp(stm32_adc_ic[i].name, ch_name, STM32_ADC_CH_SZ)) {
-                       adc->int_ch[i] = chan;
-
-                       if (stm32_adc_ic[i].idx != STM32_ADC_INT_CH_VREFINT)
-                               continue;
+                       if (stm32_adc_ic[i].idx != STM32_ADC_INT_CH_VREFINT) {
+                               adc->int_ch[i] = chan;
+                               break;
+                       }
 
                        /* Get calibration data for vrefint channel */
                        ret = nvmem_cell_read_u16(&indio_dev->dev, "vrefint", &vrefint);
@@ -1990,10 +1980,15 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n
                                return dev_err_probe(indio_dev->dev.parent, ret,
                                                     "nvmem access error\n");
                        }
-                       if (ret == -ENOENT)
-                               dev_dbg(&indio_dev->dev, "vrefint calibration not found\n");
-                       else
-                               adc->vrefint.vrefint_cal = vrefint;
+                       if (ret == -ENOENT) {
+                               dev_dbg(&indio_dev->dev, "vrefint calibration not found. Skip vrefint channel\n");
+                               return ret;
+                       } else if (!vrefint) {
+                               dev_dbg(&indio_dev->dev, "Null vrefint calibration value. Skip vrefint channel\n");
+                               return -ENOENT;
+                       }
+                       adc->int_ch[i] = chan;
+                       adc->vrefint.vrefint_cal = vrefint;
                }
        }
 
@@ -2030,7 +2025,9 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
                        }
                        strncpy(adc->chan_name[val], name, STM32_ADC_CH_SZ);
                        ret = stm32_adc_populate_int_ch(indio_dev, name, val);
-                       if (ret)
+                       if (ret == -ENOENT)
+                               continue;
+                       else if (ret)
                                goto err;
                } else if (ret != -EINVAL) {
                        dev_err(&indio_dev->dev, "Invalid label %d\n", ret);
index 0c2025a..80a0981 100644 (file)
@@ -739,7 +739,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev)
        device_for_each_child_node(dev, node) {
                ret = fwnode_property_read_u32(node, "reg", &channel);
                if (ret)
-                       return ret;
+                       goto err_child_out;
 
                ret = fwnode_property_read_u32(node, "ti,gain", &tmp);
                if (ret) {
@@ -747,7 +747,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev)
                } else {
                        ret = ads131e08_pga_gain_to_field_value(st, tmp);
                        if (ret < 0)
-                               return ret;
+                               goto err_child_out;
 
                        channel_config[i].pga_gain = tmp;
                }
@@ -758,7 +758,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev)
                } else {
                        ret = ads131e08_validate_channel_mux(st, tmp);
                        if (ret)
-                               return ret;
+                               goto err_child_out;
 
                        channel_config[i].mux = tmp;
                }
@@ -784,6 +784,10 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev)
        st->channel_config = channel_config;
 
        return 0;
+
+err_child_out:
+       fwnode_handle_put(node);
+       return ret;
 }
 
 static void ads131e08_regulator_disable(void *data)
index a55396c..a768770 100644 (file)
@@ -1409,7 +1409,7 @@ static int ams_probe(struct platform_device *pdev)
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
-               return ret;
+               return irq;
 
        ret = devm_request_irq(&pdev->dev, irq, &ams_irq, 0, "ams-irq",
                               indio_dev);
index c6cf709..6949d21 100644 (file)
@@ -277,7 +277,7 @@ static int rescale_configure_channel(struct device *dev,
        chan->ext_info = rescale->ext_info;
        chan->type = rescale->cfg->type;
 
-       if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) ||
+       if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) &&
            iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE)) {
                dev_info(dev, "using raw+scale source channel\n");
        } else if (iio_channel_has_info(schan, IIO_CHAN_INFO_PROCESSED)) {
index 847194f..80ef1aa 100644 (file)
@@ -499,11 +499,11 @@ static int ccs811_probe(struct i2c_client *client,
 
                data->drdy_trig->ops = &ccs811_trigger_ops;
                iio_trigger_set_drvdata(data->drdy_trig, indio_dev);
-               indio_dev->trig = data->drdy_trig;
-               iio_trigger_get(indio_dev->trig);
                ret = iio_trigger_register(data->drdy_trig);
                if (ret)
                        goto err_poweroff;
+
+               indio_dev->trig = iio_trigger_get(data->drdy_trig);
        }
 
        ret = iio_triggered_buffer_setup(indio_dev, NULL,
index a7994f8..1aac566 100644 (file)
@@ -700,8 +700,10 @@ static int admv1014_init(struct admv1014_state *st)
                         ADMV1014_DET_EN_MSK;
 
        enable_reg = FIELD_PREP(ADMV1014_P1DB_COMPENSATION_MSK, st->p1db_comp ? 3 : 0) |
-                    FIELD_PREP(ADMV1014_IF_AMP_PD_MSK, !(st->input_mode)) |
-                    FIELD_PREP(ADMV1014_BB_AMP_PD_MSK, st->input_mode) |
+                    FIELD_PREP(ADMV1014_IF_AMP_PD_MSK,
+                               (st->input_mode == ADMV1014_IF_MODE) ? 0 : 1) |
+                    FIELD_PREP(ADMV1014_BB_AMP_PD_MSK,
+                               (st->input_mode == ADMV1014_IF_MODE) ? 1 : 0) |
                     FIELD_PREP(ADMV1014_DET_EN_MSK, st->det_en);
 
        return __admv1014_spi_update_bits(st, ADMV1014_REG_ENABLE, enable_reg_msk, enable_reg);
index 4f19dc7..5908a96 100644 (file)
@@ -875,6 +875,7 @@ static int mpu3050_power_up(struct mpu3050 *mpu3050)
        ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
                                 MPU3050_PWR_MGM_SLEEP, 0);
        if (ret) {
+               regulator_bulk_disable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
                dev_err(mpu3050->dev, "error setting power mode\n");
                return ret;
        }
index f29692b..66b3241 100644 (file)
@@ -135,9 +135,12 @@ int hts221_allocate_trigger(struct iio_dev *iio_dev)
 
        iio_trigger_set_drvdata(hw->trig, iio_dev);
        hw->trig->ops = &hts221_trigger_ops;
+
+       err = devm_iio_trigger_register(hw->dev, hw->trig);
+
        iio_dev->trig = iio_trigger_get(hw->trig);
 
-       return devm_iio_trigger_register(hw->dev, hw->trig);
+       return err;
 }
 
 static int hts221_buffer_preenable(struct iio_dev *iio_dev)
index c0f5059..995a9dc 100644 (file)
@@ -17,6 +17,7 @@
 #include "inv_icm42600_buffer.h"
 
 enum inv_icm42600_chip {
+       INV_CHIP_INVALID,
        INV_CHIP_ICM42600,
        INV_CHIP_ICM42602,
        INV_CHIP_ICM42605,
index 86858da..ca85fcc 100644 (file)
@@ -565,7 +565,7 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq,
        bool open_drain;
        int ret;
 
-       if (chip < 0 || chip >= INV_CHIP_NB) {
+       if (chip <= INV_CHIP_INVALID || chip >= INV_CHIP_NB) {
                dev_err(dev, "invalid chip = %d\n", chip);
                return -ENODEV;
        }
index 9ff7b0e..b2bc637 100644 (file)
@@ -639,7 +639,7 @@ static int yas532_get_calibration_data(struct yas5xx *yas5xx)
        dev_dbg(yas5xx->dev, "calibration data: %*ph\n", 14, data);
 
        /* Sanity check, is this all zeroes? */
-       if (memchr_inv(data, 0x00, 13)) {
+       if (memchr_inv(data, 0x00, 13) == NULL) {
                if (!(data[13] & BIT(7)))
                        dev_warn(yas5xx->dev, "calibration is blank!\n");
        }
index 70c37f6..63fbcaa 100644 (file)
@@ -885,6 +885,9 @@ sx9324_get_default_reg(struct device *dev, int idx,
                        break;
                ret = device_property_read_u32_array(dev, prop, pin_defs,
                                                     ARRAY_SIZE(pin_defs));
+               if (ret)
+                       break;
+
                for (pin = 0; pin < SX9324_NUM_PINS; pin++)
                        raw |= (pin_defs[pin] << (2 * pin)) &
                               SX9324_REG_AFE_PH0_PIN_MASK(pin);
index 56ca0ad..4c66c3f 100644 (file)
@@ -6,7 +6,7 @@
 # Keep in alphabetical order
 config IIO_RESCALE_KUNIT_TEST
        bool "Test IIO rescale conversion functions"
-       depends on KUNIT=y && !IIO_RESCALE
+       depends on KUNIT=y && IIO_RESCALE=y
        default KUNIT_ALL_TESTS
        help
          If you want to run tests on the iio-rescale code say Y here.
index f15ae0a..880360f 100644 (file)
@@ -4,6 +4,6 @@
 #
 
 # Keep in alphabetical order
-obj-$(CONFIG_IIO_RESCALE_KUNIT_TEST) += iio-test-rescale.o ../afe/iio-rescale.o
+obj-$(CONFIG_IIO_RESCALE_KUNIT_TEST) += iio-test-rescale.o
 obj-$(CONFIG_IIO_TEST_FORMAT) += iio-test-format.o
 CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN)
index f1a8704..d6c5e96 100644 (file)
@@ -190,6 +190,7 @@ static int iio_sysfs_trigger_remove(int id)
        }
 
        iio_trigger_unregister(t->trig);
+       irq_work_sync(&t->work);
        iio_trigger_free(t->trig);
 
        list_del(&t->l);
index 1c107d6..b985e0d 100644 (file)
@@ -1252,8 +1252,10 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
                return ERR_CAST(cm_id_priv);
 
        err = cm_init_listen(cm_id_priv, service_id, 0);
-       if (err)
+       if (err) {
+               ib_destroy_cm_id(&cm_id_priv->id);
                return ERR_PTR(err);
+       }
 
        spin_lock_irq(&cm_id_priv->lock);
        listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
index 638bf4a..646fa86 100644 (file)
@@ -4231,10 +4231,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
        struct irdma_cm_node *cm_node;
        struct list_head teardown_list;
        struct ib_qp_attr attr;
-       struct irdma_sc_vsi *vsi = &iwdev->vsi;
-       struct irdma_sc_qp *sc_qp;
-       struct irdma_qp *qp;
-       int i;
 
        INIT_LIST_HEAD(&teardown_list);
 
@@ -4251,52 +4247,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
                        irdma_cm_disconn(cm_node->iwqp);
                irdma_rem_ref_cm_node(cm_node);
        }
-       if (!iwdev->roce_mode)
-               return;
-
-       INIT_LIST_HEAD(&teardown_list);
-       for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
-               mutex_lock(&vsi->qos[i].qos_mutex);
-               list_for_each_safe (list_node, list_core_temp,
-                                   &vsi->qos[i].qplist) {
-                       u32 qp_ip[4];
-
-                       sc_qp = container_of(list_node, struct irdma_sc_qp,
-                                            list);
-                       if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC)
-                               continue;
-
-                       qp = sc_qp->qp_uk.back_qp;
-                       if (!disconnect_all) {
-                               if (nfo->ipv4)
-                                       qp_ip[0] = qp->udp_info.local_ipaddr[3];
-                               else
-                                       memcpy(qp_ip,
-                                              &qp->udp_info.local_ipaddr[0],
-                                              sizeof(qp_ip));
-                       }
-
-                       if (disconnect_all ||
-                           (nfo->vlan_id == (qp->udp_info.vlan_tag & VLAN_VID_MASK) &&
-                            !memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) {
-                               spin_lock(&iwdev->rf->qptable_lock);
-                               if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) {
-                                       irdma_qp_add_ref(&qp->ibqp);
-                                       list_add(&qp->teardown_entry,
-                                                &teardown_list);
-                               }
-                               spin_unlock(&iwdev->rf->qptable_lock);
-                       }
-               }
-               mutex_unlock(&vsi->qos[i].qos_mutex);
-       }
-
-       list_for_each_safe (list_node, list_core_temp, &teardown_list) {
-               qp = container_of(list_node, struct irdma_qp, teardown_entry);
-               attr.qp_state = IB_QPS_ERR;
-               irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
-               irdma_qp_rem_ref(&qp->ibqp);
-       }
 }
 
 /**
index e46fc11..50299f5 100644 (file)
@@ -201,6 +201,7 @@ void i40iw_init_hw(struct irdma_sc_dev *dev)
        dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD;
        dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT;
        dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE;
+       dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M;
        dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE;
        dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE;
        dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES;
index cf53b17..5986fd9 100644 (file)
@@ -139,6 +139,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev)
        dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
        dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
        dev->irq_ops = &icrdma_irq_ops;
+       dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
        dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
        dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
        dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
index 46c1233..4789e85 100644 (file)
@@ -127,6 +127,7 @@ struct irdma_hw_attrs {
        u64 max_hw_outbound_msg_size;
        u64 max_hw_inbound_msg_size;
        u64 max_mr_size;
+       u64 page_size_cap;
        u32 min_hw_qp_id;
        u32 min_hw_aeq_size;
        u32 max_hw_aeq_size;
index c4412ec..96135a2 100644 (file)
@@ -32,7 +32,7 @@ static int irdma_query_device(struct ib_device *ibdev,
        props->vendor_part_id = pcidev->device;
 
        props->hw_ver = rf->pcidev->revision;
-       props->page_size_cap = SZ_4K | SZ_2M | SZ_1G;
+       props->page_size_cap = hw_attrs->page_size_cap;
        props->max_mr_size = hw_attrs->max_mr_size;
        props->max_qp = rf->max_qp - rf->used_qps;
        props->max_qp_wr = hw_attrs->max_qp_wr;
@@ -2781,7 +2781,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 
        if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
                iwmr->page_size = ib_umem_find_best_pgsz(region,
-                                                        SZ_4K | SZ_2M | SZ_1G,
+                                                        iwdev->rf->sc_dev.hw_attrs.page_size_cap,
                                                         virt);
                if (unlikely(!iwmr->page_size)) {
                        kfree(iwmr);
index 8def88c..db9ef3e 100644 (file)
@@ -418,6 +418,7 @@ struct qedr_qp {
        u32 sq_psn;
        u32 qkey;
        u32 dest_qp_num;
+       u8 timeout;
 
        /* Relevant to qps created from kernel space only (ULPs) */
        u8 prev_wqe_size;
index f0f43b6..03ed7c0 100644 (file)
@@ -2613,6 +2613,8 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                                        1 << max_t(int, attr->timeout - 8, 0);
                else
                        qp_params.ack_timeout = 0;
+
+               qp->timeout = attr->timeout;
        }
 
        if (attr_mask & IB_QP_RETRY_CNT) {
@@ -2772,7 +2774,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
        rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
        rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
        rdma_ah_set_sl(&qp_attr->ah_attr, 0);
-       qp_attr->timeout = params.timeout;
+       qp_attr->timeout = qp->timeout;
        qp_attr->rnr_retry = params.rnr_retry;
        qp_attr->retry_cnt = params.retry_cnt;
        qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
index 3ad9870..aa45a9f 100644 (file)
@@ -900,6 +900,11 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
        } else {
                dev_warn(dev, "Unexpected ACPI resources: gpio_count %d, gpio_int_idx %d\n",
                         ts->gpio_count, ts->gpio_int_idx);
+               /*
+                * On some devices _PS0 does a reset for us and
+                * sometimes this is necessary for things to work.
+                */
+               acpi_device_fix_up_power(ACPI_COMPANION(dev));
                return -EINVAL;
        }
 
index 43c521f..3dda6ea 100644 (file)
@@ -1654,6 +1654,9 @@ static int usbtouch_probe(struct usb_interface *intf,
        if (id->driver_info == DEVTYPE_IGNORE)
                return -ENODEV;
 
+       if (id->driver_info >= ARRAY_SIZE(usbtouch_dev_info))
+               return -ENODEV;
+
        endpoint = usbtouch_get_input_endpoint(intf->cur_altsetting);
        if (!endpoint)
                return -ENXIO;
index 2757c77..f51ab56 100644 (file)
@@ -758,7 +758,9 @@ batt_err:
 
 static int wm97xx_mfd_remove(struct platform_device *pdev)
 {
-       return wm97xx_remove(&pdev->dev);
+       wm97xx_remove(&pdev->dev);
+
+       return 0;
 }
 
 static int __maybe_unused wm97xx_suspend(struct device *dev)
index 592c1e1..9699ca1 100644 (file)
@@ -382,7 +382,7 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
 
 static struct notifier_block dmar_pci_bus_nb = {
        .notifier_call = dmar_pci_bus_notifier,
-       .priority = INT_MIN,
+       .priority = 1,
 };
 
 static struct dmar_drhd_unit *
index 4401659..5c0dce7 100644 (file)
@@ -320,30 +320,6 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
 DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
-/*
- * Iterate over elements in device_domain_list and call the specified
- * callback @fn against each element.
- */
-int for_each_device_domain(int (*fn)(struct device_domain_info *info,
-                                    void *data), void *data)
-{
-       int ret = 0;
-       unsigned long flags;
-       struct device_domain_info *info;
-
-       spin_lock_irqsave(&device_domain_lock, flags);
-       list_for_each_entry(info, &device_domain_list, global) {
-               ret = fn(info, data);
-               if (ret) {
-                       spin_unlock_irqrestore(&device_domain_lock, flags);
-                       return ret;
-               }
-       }
-       spin_unlock_irqrestore(&device_domain_lock, flags);
-
-       return 0;
-}
-
 const struct iommu_ops intel_iommu_ops;
 
 static bool translation_pre_enabled(struct intel_iommu *iommu)
index cb4c1d0..17cad7c 100644 (file)
@@ -86,54 +86,6 @@ void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid)
 /*
  * Per device pasid table management:
  */
-static inline void
-device_attach_pasid_table(struct device_domain_info *info,
-                         struct pasid_table *pasid_table)
-{
-       info->pasid_table = pasid_table;
-       list_add(&info->table, &pasid_table->dev);
-}
-
-static inline void
-device_detach_pasid_table(struct device_domain_info *info,
-                         struct pasid_table *pasid_table)
-{
-       info->pasid_table = NULL;
-       list_del(&info->table);
-}
-
-struct pasid_table_opaque {
-       struct pasid_table      **pasid_table;
-       int                     segment;
-       int                     bus;
-       int                     devfn;
-};
-
-static int search_pasid_table(struct device_domain_info *info, void *opaque)
-{
-       struct pasid_table_opaque *data = opaque;
-
-       if (info->iommu->segment == data->segment &&
-           info->bus == data->bus &&
-           info->devfn == data->devfn &&
-           info->pasid_table) {
-               *data->pasid_table = info->pasid_table;
-               return 1;
-       }
-
-       return 0;
-}
-
-static int get_alias_pasid_table(struct pci_dev *pdev, u16 alias, void *opaque)
-{
-       struct pasid_table_opaque *data = opaque;
-
-       data->segment = pci_domain_nr(pdev->bus);
-       data->bus = PCI_BUS_NUM(alias);
-       data->devfn = alias & 0xff;
-
-       return for_each_device_domain(&search_pasid_table, data);
-}
 
 /*
  * Allocate a pasid table for @dev. It should be called in a
@@ -143,28 +95,18 @@ int intel_pasid_alloc_table(struct device *dev)
 {
        struct device_domain_info *info;
        struct pasid_table *pasid_table;
-       struct pasid_table_opaque data;
        struct page *pages;
        u32 max_pasid = 0;
-       int ret, order;
-       int size;
+       int order, size;
 
        might_sleep();
        info = dev_iommu_priv_get(dev);
        if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table))
                return -EINVAL;
 
-       /* DMA alias device already has a pasid table, use it: */
-       data.pasid_table = &pasid_table;
-       ret = pci_for_each_dma_alias(to_pci_dev(dev),
-                                    &get_alias_pasid_table, &data);
-       if (ret)
-               goto attach_out;
-
        pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL);
        if (!pasid_table)
                return -ENOMEM;
-       INIT_LIST_HEAD(&pasid_table->dev);
 
        if (info->pasid_supported)
                max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)),
@@ -182,9 +124,7 @@ int intel_pasid_alloc_table(struct device *dev)
        pasid_table->table = page_address(pages);
        pasid_table->order = order;
        pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
-
-attach_out:
-       device_attach_pasid_table(info, pasid_table);
+       info->pasid_table = pasid_table;
 
        return 0;
 }
@@ -202,10 +142,7 @@ void intel_pasid_free_table(struct device *dev)
                return;
 
        pasid_table = info->pasid_table;
-       device_detach_pasid_table(info, pasid_table);
-
-       if (!list_empty(&pasid_table->dev))
-               return;
+       info->pasid_table = NULL;
 
        /* Free scalable mode PASID directory tables: */
        dir = pasid_table->table;
index 583ea67..bf5b937 100644 (file)
@@ -74,7 +74,6 @@ struct pasid_table {
        void                    *table;         /* pasid table pointer */
        int                     order;          /* page order of pasid table */
        u32                     max_pasid;      /* max pasid */
-       struct list_head        dev;            /* device list */
 };
 
 /* Get PRESENT bit of a PASID directory entry. */
index 8fdb84b..1d42084 100644 (file)
@@ -987,7 +987,7 @@ static const struct of_device_id ipmmu_of_ids[] = {
                .compatible = "renesas,ipmmu-r8a779a0",
                .data = &ipmmu_features_rcar_gen4,
        }, {
-               .compatible = "renesas,rcar-gen4-ipmmu",
+               .compatible = "renesas,rcar-gen4-ipmmu-vmsa",
                .data = &ipmmu_features_rcar_gen4,
        }, {
                /* Terminator */
index 4ab1038..bbb11cb 100644 (file)
@@ -298,7 +298,7 @@ config XTENSA_MX
 
 config XILINX_INTC
        bool "Xilinx Interrupt Controller IP"
-       depends on MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP
+       depends on OF_ADDRESS
        select IRQ_DOMAIN
        help
          Support for the Xilinx Interrupt Controller IP core.
index 12dd487..1c2813a 100644 (file)
 #define AIC_TMR_EL02_PHYS      AIC_TMR_GUEST_PHYS
 #define AIC_TMR_EL02_VIRT      AIC_TMR_GUEST_VIRT
 
-DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
+static DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
 
 struct aic_info {
        int version;
@@ -1035,6 +1035,7 @@ static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff)
                        continue;
 
                cpu = of_cpu_node_to_id(cpu_node);
+               of_node_put(cpu_node);
                if (WARN_ON(cpu < 0))
                        continue;
 
@@ -1143,6 +1144,7 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
                for_each_child_of_node(affs, chld)
                        build_fiq_affinity(irqc, chld);
        }
+       of_node_put(affs);
 
        set_handle_irq(aic_handle_irq);
        set_handle_fiq(aic_handle_fiq);
index b4c1924..38fab02 100644 (file)
@@ -57,6 +57,7 @@ realview_gic_of_init(struct device_node *node, struct device_node *parent)
 
        /* The PB11MPCore GIC needs to be configured in the syscon */
        map = syscon_node_to_regmap(np);
+       of_node_put(np);
        if (!IS_ERR(map)) {
                /* new irq mode with no DCC */
                regmap_write(map, REALVIEW_SYS_LOCK_OFFSET,
index 2be8dea..2d25bca 100644 (file)
@@ -1932,7 +1932,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
 
        gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
        if (!gic_data.ppi_descs)
-               return;
+               goto out_put_node;
 
        nr_parts = of_get_child_count(parts_node);
 
@@ -1973,12 +1973,15 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
                                continue;
 
                        cpu = of_cpu_node_to_id(cpu_node);
-                       if (WARN_ON(cpu < 0))
+                       if (WARN_ON(cpu < 0)) {
+                               of_node_put(cpu_node);
                                continue;
+                       }
 
                        pr_cont("%pOF[%d] ", cpu_node, cpu);
 
                        cpumask_set_cpu(cpu, &part->mask);
+                       of_node_put(cpu_node);
                }
 
                pr_cont("}\n");
@@ -2039,15 +2042,40 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
        vgic_set_kvm_info(&gic_v3_kvm_info);
 }
 
+static void gic_request_region(resource_size_t base, resource_size_t size,
+                              const char *name)
+{
+       if (!request_mem_region(base, size, name))
+               pr_warn_once(FW_BUG "%s region %pa has overlapping address\n",
+                            name, &base);
+}
+
+static void __iomem *gic_of_iomap(struct device_node *node, int idx,
+                                 const char *name, struct resource *res)
+{
+       void __iomem *base;
+       int ret;
+
+       ret = of_address_to_resource(node, idx, res);
+       if (ret)
+               return IOMEM_ERR_PTR(ret);
+
+       gic_request_region(res->start, resource_size(res), name);
+       base = of_iomap(node, idx);
+
+       return base ?: IOMEM_ERR_PTR(-ENOMEM);
+}
+
 static int __init gic_of_init(struct device_node *node, struct device_node *parent)
 {
        void __iomem *dist_base;
        struct redist_region *rdist_regs;
+       struct resource res;
        u64 redist_stride;
        u32 nr_redist_regions;
        int err, i;
 
-       dist_base = of_io_request_and_map(node, 0, "GICD");
+       dist_base = gic_of_iomap(node, 0, "GICD", &res);
        if (IS_ERR(dist_base)) {
                pr_err("%pOF: unable to map gic dist registers\n", node);
                return PTR_ERR(dist_base);
@@ -2070,12 +2098,8 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
        }
 
        for (i = 0; i < nr_redist_regions; i++) {
-               struct resource res;
-               int ret;
-
-               ret = of_address_to_resource(node, 1 + i, &res);
-               rdist_regs[i].redist_base = of_io_request_and_map(node, 1 + i, "GICR");
-               if (ret || IS_ERR(rdist_regs[i].redist_base)) {
+               rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res);
+               if (IS_ERR(rdist_regs[i].redist_base)) {
                        pr_err("%pOF: couldn't map region %d\n", node, i);
                        err = -ENODEV;
                        goto out_unmap_rdist;
@@ -2148,7 +2172,7 @@ gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
                pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
                return -ENOMEM;
        }
-       request_mem_region(redist->base_address, redist->length, "GICR");
+       gic_request_region(redist->base_address, redist->length, "GICR");
 
        gic_acpi_register_redist(redist->base_address, redist_base);
        return 0;
@@ -2171,7 +2195,7 @@ gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
        redist_base = ioremap(gicc->gicr_base_address, size);
        if (!redist_base)
                return -ENOMEM;
-       request_mem_region(gicc->gicr_base_address, size, "GICR");
+       gic_request_region(gicc->gicr_base_address, size, "GICR");
 
        gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
        return 0;
@@ -2373,7 +2397,7 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
                pr_err("Unable to map GICD registers\n");
                return -ENOMEM;
        }
-       request_mem_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD");
+       gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD");
 
        err = gic_validate_dist_version(acpi_data.dist_base);
        if (err) {
index aed8885..8d05d8b 100644 (file)
 
 #define LIOINTC_ERRATA_IRQ     10
 
+#if defined(CONFIG_MIPS)
+#define liointc_core_id get_ebase_cpunum()
+#else
+#define liointc_core_id get_csr_cpuid()
+#endif
+
 struct liointc_handler_data {
        struct liointc_priv     *priv;
        u32                     parent_int_map;
@@ -57,7 +63,7 @@ static void liointc_chained_handle_irq(struct irq_desc *desc)
        struct liointc_handler_data *handler = irq_desc_get_handler_data(desc);
        struct irq_chip *chip = irq_desc_get_chip(desc);
        struct irq_chip_generic *gc = handler->priv->gc;
-       int core = cpu_logical_map(smp_processor_id()) % LIOINTC_NUM_CORES;
+       int core = liointc_core_id % LIOINTC_NUM_CORES;
        u32 pending;
 
        chained_irq_enter(chip, desc);
index 49b47e7..f289ccd 100644 (file)
@@ -66,7 +66,6 @@ static struct or1k_pic_dev or1k_pic_level = {
                .name = "or1k-PIC-level",
                .irq_unmask = or1k_pic_unmask,
                .irq_mask = or1k_pic_mask,
-               .irq_mask_ack = or1k_pic_mask_ack,
        },
        .handle = handle_level_irq,
        .flags = IRQ_LEVEL | IRQ_NOPROBE,
index 50a5682..56bf502 100644 (file)
@@ -134,9 +134,9 @@ static int __init map_interrupts(struct device_node *node, struct irq_domain *do
                if (!cpu_ictl)
                        return -EINVAL;
                ret = of_property_read_u32(cpu_ictl, "#interrupt-cells", &tmp);
+               of_node_put(cpu_ictl);
                if (ret || tmp != 1)
                        return -EINVAL;
-               of_node_put(cpu_ictl);
 
                cpu_int = be32_to_cpup(imap + 2);
                if (cpu_int > 7 || cpu_int < 2)
index 89121b3..716b1bb 100644 (file)
@@ -237,6 +237,7 @@ static const struct of_device_id uniphier_aidet_match[] = {
        { .compatible = "socionext,uniphier-ld11-aidet" },
        { .compatible = "socionext,uniphier-ld20-aidet" },
        { .compatible = "socionext,uniphier-pxs3-aidet" },
+       { .compatible = "socionext,uniphier-nx1-aidet" },
        { /* sentinel */ }
 };
 
index 54c0473..c954ff9 100644 (file)
@@ -272,6 +272,7 @@ struct dm_io {
        atomic_t io_count;
        struct mapped_device *md;
 
+       struct bio *split_bio;
        /* The three fields represent mapped part of original bio */
        struct bio *orig_bio;
        unsigned int sector_offset; /* offset to end of orig_bio */
index 1f6bf15..e92c1af 100644 (file)
@@ -1400,7 +1400,7 @@ static void start_worker(struct era *era)
 static void stop_worker(struct era *era)
 {
        atomic_set(&era->suspended, 1);
-       flush_workqueue(era->wq);
+       drain_workqueue(era->wq);
 }
 
 /*----------------------------------------------------------------
@@ -1570,6 +1570,12 @@ static void era_postsuspend(struct dm_target *ti)
        }
 
        stop_worker(era);
+
+       r = metadata_commit(era->md);
+       if (r) {
+               DMERR("%s: metadata_commit failed", __func__);
+               /* FIXME: fail mode */
+       }
 }
 
 static int era_preresume(struct dm_target *ti)
index 06f3289..0c6620e 100644 (file)
@@ -415,8 +415,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
        /*
         * Work out how many "unsigned long"s we need to hold the bitset.
         */
-       bitset_size = dm_round_up(region_count,
-                                 sizeof(*lc->clean_bits) << BYTE_SHIFT);
+       bitset_size = dm_round_up(region_count, BITS_PER_LONG);
        bitset_size >>= BYTE_SHIFT;
 
        lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
@@ -616,7 +615,7 @@ static int disk_resume(struct dm_dirty_log *log)
                        log_clear_bit(lc, lc->clean_bits, i);
 
        /* clear any old bits -- device has shrunk */
-       for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
+       for (i = lc->region_count; i % BITS_PER_LONG; i++)
                log_clear_bit(lc, lc->clean_bits, i);
 
        /* copy clean across to sync */
index 5e41fba..80c9f71 100644 (file)
@@ -1001,12 +1001,13 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
 static int validate_raid_redundancy(struct raid_set *rs)
 {
        unsigned int i, rebuild_cnt = 0;
-       unsigned int rebuilds_per_group = 0, copies;
+       unsigned int rebuilds_per_group = 0, copies, raid_disks;
        unsigned int group_size, last_group_start;
 
-       for (i = 0; i < rs->md.raid_disks; i++)
-               if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
-                   !rs->dev[i].rdev.sb_page)
+       for (i = 0; i < rs->raid_disks; i++)
+               if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
+                   ((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
+                     !rs->dev[i].rdev.sb_page)))
                        rebuild_cnt++;
 
        switch (rs->md.level) {
@@ -1046,8 +1047,9 @@ static int validate_raid_redundancy(struct raid_set *rs)
                 *          A    A    B    B    C
                 *          C    D    D    E    E
                 */
+               raid_disks = min(rs->raid_disks, rs->md.raid_disks);
                if (__is_raid10_near(rs->md.new_layout)) {
-                       for (i = 0; i < rs->md.raid_disks; i++) {
+                       for (i = 0; i < raid_disks; i++) {
                                if (!(i % copies))
                                        rebuilds_per_group = 0;
                                if ((!rs->dev[i].rdev.sb_page ||
@@ -1070,10 +1072,10 @@ static int validate_raid_redundancy(struct raid_set *rs)
                 * results in the need to treat the last (potentially larger)
                 * set differently.
                 */
-               group_size = (rs->md.raid_disks / copies);
-               last_group_start = (rs->md.raid_disks / group_size) - 1;
+               group_size = (raid_disks / copies);
+               last_group_start = (raid_disks / group_size) - 1;
                last_group_start *= group_size;
-               for (i = 0; i < rs->md.raid_disks; i++) {
+               for (i = 0; i < raid_disks; i++) {
                        if (!(i % copies) && !(i > last_group_start))
                                rebuilds_per_group = 0;
                        if ((!rs->dev[i].rdev.sb_page ||
@@ -1588,7 +1590,7 @@ static sector_t __rdev_sectors(struct raid_set *rs)
 {
        int i;
 
-       for (i = 0; i < rs->md.raid_disks; i++) {
+       for (i = 0; i < rs->raid_disks; i++) {
                struct md_rdev *rdev = &rs->dev[i].rdev;
 
                if (!test_bit(Journal, &rdev->flags) &&
@@ -3725,7 +3727,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
        if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
                if (mddev->sync_thread) {
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-                       md_reap_sync_thread(mddev, false);
+                       md_reap_sync_thread(mddev);
                }
        } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
                return -EBUSY;
@@ -3766,13 +3768,13 @@ static int raid_iterate_devices(struct dm_target *ti,
        unsigned int i;
        int r = 0;
 
-       for (i = 0; !r && i < rs->md.raid_disks; i++)
-               if (rs->dev[i].data_dev)
-                       r = fn(ti,
-                                rs->dev[i].data_dev,
-                                0, /* No offset on data devs */
-                                rs->md.dev_sectors,
-                                data);
+       for (i = 0; !r && i < rs->raid_disks; i++) {
+               if (rs->dev[i].data_dev) {
+                       r = fn(ti, rs->dev[i].data_dev,
+                              0, /* No offset on data devs */
+                              rs->md.dev_sectors, data);
+               }
+       }
 
        return r;
 }
index d8f1618..2b75f1e 100644 (file)
@@ -555,6 +555,10 @@ static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
                unsigned long flags;
                /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */
                spin_lock_irqsave(&io->lock, flags);
+               if (dm_io_flagged(io, DM_IO_ACCOUNTED)) {
+                       spin_unlock_irqrestore(&io->lock, flags);
+                       return;
+               }
                dm_io_set_flag(io, DM_IO_ACCOUNTED);
                spin_unlock_irqrestore(&io->lock, flags);
        }
@@ -590,6 +594,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
        atomic_set(&io->io_count, 2);
        this_cpu_inc(*md->pending_io);
        io->orig_bio = bio;
+       io->split_bio = NULL;
        io->md = md;
        spin_lock_init(&io->lock);
        io->start_time = jiffies;
@@ -711,18 +716,18 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
 }
 
 static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
-                                                    int *srcu_idx, struct bio *bio)
+                                                    int *srcu_idx, unsigned bio_opf)
 {
-       if (bio->bi_opf & REQ_NOWAIT)
+       if (bio_opf & REQ_NOWAIT)
                return dm_get_live_table_fast(md);
        else
                return dm_get_live_table(md, srcu_idx);
 }
 
 static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx,
-                                        struct bio *bio)
+                                        unsigned bio_opf)
 {
-       if (bio->bi_opf & REQ_NOWAIT)
+       if (bio_opf & REQ_NOWAIT)
                dm_put_live_table_fast(md);
        else
                dm_put_live_table(md, srcu_idx);
@@ -883,7 +888,7 @@ static void dm_io_complete(struct dm_io *io)
 {
        blk_status_t io_error;
        struct mapped_device *md = io->md;
-       struct bio *bio = io->orig_bio;
+       struct bio *bio = io->split_bio ? io->split_bio : io->orig_bio;
 
        if (io->status == BLK_STS_DM_REQUEUE) {
                unsigned long flags;
@@ -935,9 +940,11 @@ static void dm_io_complete(struct dm_io *io)
                        if (io_error == BLK_STS_AGAIN) {
                                /* io_uring doesn't handle BLK_STS_AGAIN (yet) */
                                queue_io(md, bio);
+                               return;
                        }
                }
-               return;
+               if (io_error == BLK_STS_DM_REQUEUE)
+                       return;
        }
 
        if (bio_is_flush_with_data(bio)) {
@@ -1609,7 +1616,12 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
        ti = dm_table_find_target(ci->map, ci->sector);
        if (unlikely(!ti))
                return BLK_STS_IOERR;
-       else if (unlikely(ci->is_abnormal_io))
+
+       if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) &&
+           unlikely(!dm_target_supports_nowait(ti->type)))
+               return BLK_STS_NOTSUPP;
+
+       if (unlikely(ci->is_abnormal_io))
                return __process_abnormal_io(ci, ti);
 
        /*
@@ -1682,9 +1694,11 @@ static void dm_split_and_process_bio(struct mapped_device *md,
         * Remainder must be passed to submit_bio_noacct() so it gets handled
         * *after* bios already submitted have been completely processed.
         */
-       bio_trim(bio, io->sectors, ci.sector_count);
-       trace_block_split(bio, bio->bi_iter.bi_sector);
-       bio_inc_remaining(bio);
+       WARN_ON_ONCE(!dm_io_flagged(io, DM_IO_WAS_SPLIT));
+       io->split_bio = bio_split(bio, io->sectors, GFP_NOIO,
+                                 &md->queue->bio_split);
+       bio_chain(io->split_bio, bio);
+       trace_block_split(io->split_bio, bio->bi_iter.bi_sector);
        submit_bio_noacct(bio);
 out:
        /*
@@ -1711,8 +1725,9 @@ static void dm_submit_bio(struct bio *bio)
        struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
        int srcu_idx;
        struct dm_table *map;
+       unsigned bio_opf = bio->bi_opf;
 
-       map = dm_get_live_table_bio(md, &srcu_idx, bio);
+       map = dm_get_live_table_bio(md, &srcu_idx, bio_opf);
 
        /* If suspended, or map not yet available, queue this IO for later */
        if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
@@ -1728,7 +1743,7 @@ static void dm_submit_bio(struct bio *bio)
 
        dm_split_and_process_bio(md, map, bio);
 out:
-       dm_put_live_table_bio(md, srcu_idx, bio);
+       dm_put_live_table_bio(md, srcu_idx, bio_opf);
 }
 
 static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
index 8273ac5..c7ecb0b 100644 (file)
@@ -4831,7 +4831,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
                                flush_workqueue(md_misc_wq);
                        if (mddev->sync_thread) {
                                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-                               md_reap_sync_thread(mddev, true);
+                               md_reap_sync_thread(mddev);
                        }
                        mddev_unlock(mddev);
                }
@@ -6197,7 +6197,7 @@ static void __md_stop_writes(struct mddev *mddev)
                flush_workqueue(md_misc_wq);
        if (mddev->sync_thread) {
                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-               md_reap_sync_thread(mddev, true);
+               md_reap_sync_thread(mddev);
        }
 
        del_timer_sync(&mddev->safemode_timer);
@@ -9303,7 +9303,7 @@ void md_check_recovery(struct mddev *mddev)
                         * ->spare_active and clear saved_raid_disk
                         */
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-                       md_reap_sync_thread(mddev, true);
+                       md_reap_sync_thread(mddev);
                        clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
                        clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                        clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
@@ -9338,7 +9338,7 @@ void md_check_recovery(struct mddev *mddev)
                        goto unlock;
                }
                if (mddev->sync_thread) {
-                       md_reap_sync_thread(mddev, true);
+                       md_reap_sync_thread(mddev);
                        goto unlock;
                }
                /* Set RUNNING before clearing NEEDED to avoid
@@ -9411,18 +9411,14 @@ void md_check_recovery(struct mddev *mddev)
 }
 EXPORT_SYMBOL(md_check_recovery);
 
-void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held)
+void md_reap_sync_thread(struct mddev *mddev)
 {
        struct md_rdev *rdev;
        sector_t old_dev_sectors = mddev->dev_sectors;
        bool is_reshaped = false;
 
-       if (reconfig_mutex_held)
-               mddev_unlock(mddev);
        /* resync has finished, collect result */
        md_unregister_thread(&mddev->sync_thread);
-       if (reconfig_mutex_held)
-               mddev_lock_nointr(mddev);
        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
            !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
            mddev->degraded != mddev->raid_disks) {
index 5f62c46..cf2cbb1 100644 (file)
@@ -719,7 +719,7 @@ extern struct md_thread *md_register_thread(
 extern void md_unregister_thread(struct md_thread **threadp);
 extern void md_wakeup_thread(struct md_thread *thread);
 extern void md_check_recovery(struct mddev *mddev);
-extern void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held);
+extern void md_reap_sync_thread(struct mddev *mddev);
 extern int mddev_init_writes_pending(struct mddev *mddev);
 extern bool md_write_start(struct mddev *mddev, struct bio *bi);
 extern void md_write_inc(struct mddev *mddev, struct bio *bi);
index 973e2e0..0a2e480 100644 (file)
@@ -629,9 +629,9 @@ static void ppl_do_flush(struct ppl_io_unit *io)
                if (bdev) {
                        struct bio *bio;
 
-                       bio = bio_alloc_bioset(bdev, 0, GFP_NOIO,
+                       bio = bio_alloc_bioset(bdev, 0,
                                               REQ_OP_WRITE | REQ_PREFLUSH,
-                                              &ppl_conf->flush_bs);
+                                              GFP_NOIO, &ppl_conf->flush_bs);
                        bio->bi_private = io;
                        bio->bi_end_io = ppl_flush_endio;
 
index 5d09256..c8539d0 100644 (file)
@@ -7304,7 +7304,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
                goto abort;
        conf->mddev = mddev;
 
-       if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
+       ret = -ENOMEM;
+       conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!conf->stripe_hashtbl)
                goto abort;
 
        /* We init hash_locks[0] separately to that it can be used
@@ -7933,7 +7935,7 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
        int err = 0;
        int number = rdev->raid_disk;
        struct md_rdev __rcu **rdevp;
-       struct disk_info *p = conf->disks + number;
+       struct disk_info *p;
        struct md_rdev *tmp;
 
        print_raid5_conf(conf);
@@ -7952,6 +7954,9 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
                log_exit(conf);
                return 0;
        }
+       if (unlikely(number >= conf->pool_size))
+               return 0;
+       p = conf->disks + number;
        if (rdev == rcu_access_pointer(p->rdev))
                rdevp = &p->rdev;
        else if (rdev == rcu_access_pointer(p->replacement))
@@ -8062,6 +8067,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
         */
        if (rdev->saved_raid_disk >= 0 &&
            rdev->saved_raid_disk >= first &&
+           rdev->saved_raid_disk <= last &&
            conf->disks[rdev->saved_raid_disk].rdev == NULL)
                first = rdev->saved_raid_disk;
 
index b7800b3..ac1a411 100644 (file)
@@ -105,6 +105,7 @@ config TI_EMIF
 config OMAP_GPMC
        tristate "Texas Instruments OMAP SoC GPMC driver"
        depends on OF_ADDRESS
+       depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
        select GPIOLIB
        help
          This driver is for the General Purpose Memory Controller (GPMC)
index 86a3d34..4c5154e 100644 (file)
@@ -404,13 +404,16 @@ static int mtk_smi_device_link_common(struct device *dev, struct device **com_de
        of_node_put(smi_com_node);
        if (smi_com_pdev) {
                /* smi common is the supplier, Make sure it is ready before */
-               if (!platform_get_drvdata(smi_com_pdev))
+               if (!platform_get_drvdata(smi_com_pdev)) {
+                       put_device(&smi_com_pdev->dev);
                        return -EPROBE_DEFER;
+               }
                smi_com_dev = &smi_com_pdev->dev;
                link = device_link_add(dev, smi_com_dev,
                                       DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
                if (!link) {
                        dev_err(dev, "Unable to link smi-common dev\n");
+                       put_device(&smi_com_pdev->dev);
                        return -ENODEV;
                }
                *com_dev = smi_com_dev;
index 4733e78..c491cd5 100644 (file)
@@ -1187,33 +1187,39 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc)
 
        dmc->timing_row = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
                                             sizeof(u32), GFP_KERNEL);
-       if (!dmc->timing_row)
-               return -ENOMEM;
+       if (!dmc->timing_row) {
+               ret = -ENOMEM;
+               goto put_node;
+       }
 
        dmc->timing_data = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
                                              sizeof(u32), GFP_KERNEL);
-       if (!dmc->timing_data)
-               return -ENOMEM;
+       if (!dmc->timing_data) {
+               ret = -ENOMEM;
+               goto put_node;
+       }
 
        dmc->timing_power = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
                                               sizeof(u32), GFP_KERNEL);
-       if (!dmc->timing_power)
-               return -ENOMEM;
+       if (!dmc->timing_power) {
+               ret = -ENOMEM;
+               goto put_node;
+       }
 
        dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dmc->dev,
                                                 DDR_TYPE_LPDDR3,
                                                 &dmc->timings_arr_size);
        if (!dmc->timings) {
-               of_node_put(np_ddr);
                dev_warn(dmc->dev, "could not get timings from DT\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto put_node;
        }
 
        dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dmc->dev);
        if (!dmc->min_tck) {
-               of_node_put(np_ddr);
                dev_warn(dmc->dev, "could not get tck from DT\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto put_node;
        }
 
        /* Sorted array of OPPs with frequency ascending */
@@ -1227,13 +1233,14 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc)
                                             clk_period_ps);
        }
 
-       of_node_put(np_ddr);
 
        /* Take the highest frequency's timings as 'bypass' */
        dmc->bypass_timing_row = dmc->timing_row[idx - 1];
        dmc->bypass_timing_data = dmc->timing_data[idx - 1];
        dmc->bypass_timing_power = dmc->timing_power[idx - 1];
 
+put_node:
+       of_node_put(np_ddr);
        return ret;
 }
 
index d6cd553..69f9b03 100644 (file)
@@ -232,9 +232,9 @@ static int ssc_probe(struct platform_device *pdev)
        clk_disable_unprepare(ssc->clk);
 
        ssc->irq = platform_get_irq(pdev, 0);
-       if (!ssc->irq) {
+       if (ssc->irq < 0) {
                dev_dbg(&pdev->dev, "could not get irq\n");
-               return -ENXIO;
+               return ssc->irq;
        }
 
        mutex_lock(&user_lock);
index 749cc5a..b1e7603 100644 (file)
@@ -407,6 +407,8 @@ static void rts5261_init_from_hw(struct rtsx_pcr *pcr)
                // default
                setting_reg1 = PCR_SETTING_REG1;
                setting_reg2 = PCR_SETTING_REG2;
+       } else {
+               return;
        }
 
        pci_read_config_dword(pdev, setting_reg2, &lval2);
index 1ef9b61..f150d87 100644 (file)
@@ -631,16 +631,20 @@ static int rtsx_usb_probe(struct usb_interface *intf,
 
        ucr->pusb_dev = usb_dev;
 
-       ucr->iobuf = usb_alloc_coherent(ucr->pusb_dev, IOBUF_SIZE,
-                       GFP_KERNEL, &ucr->iobuf_dma);
-       if (!ucr->iobuf)
+       ucr->cmd_buf = kmalloc(IOBUF_SIZE, GFP_KERNEL);
+       if (!ucr->cmd_buf)
                return -ENOMEM;
 
+       ucr->rsp_buf = kmalloc(IOBUF_SIZE, GFP_KERNEL);
+       if (!ucr->rsp_buf) {
+               ret = -ENOMEM;
+               goto out_free_cmd_buf;
+       }
+
        usb_set_intfdata(intf, ucr);
 
        ucr->vendor_id = id->idVendor;
        ucr->product_id = id->idProduct;
-       ucr->cmd_buf = ucr->rsp_buf = ucr->iobuf;
 
        mutex_init(&ucr->dev_mutex);
 
@@ -668,8 +672,11 @@ static int rtsx_usb_probe(struct usb_interface *intf,
 
 out_init_fail:
        usb_set_intfdata(ucr->pusb_intf, NULL);
-       usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
-                       ucr->iobuf_dma);
+       kfree(ucr->rsp_buf);
+       ucr->rsp_buf = NULL;
+out_free_cmd_buf:
+       kfree(ucr->cmd_buf);
+       ucr->cmd_buf = NULL;
        return ret;
 }
 
@@ -682,8 +689,12 @@ static void rtsx_usb_disconnect(struct usb_interface *intf)
        mfd_remove_devices(&intf->dev);
 
        usb_set_intfdata(ucr->pusb_intf, NULL);
-       usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
-                       ucr->iobuf_dma);
+
+       kfree(ucr->cmd_buf);
+       ucr->cmd_buf = NULL;
+
+       kfree(ucr->rsp_buf);
+       ucr->rsp_buf = NULL;
 }
 
 #ifdef CONFIG_PM
index 8d169a3..bdffc65 100644 (file)
@@ -79,6 +79,10 @@ static int at25_ee_read(void *priv, unsigned int offset,
 {
        struct at25_data *at25 = priv;
        char *buf = val;
+       size_t max_chunk = spi_max_transfer_size(at25->spi);
+       unsigned int msg_offset = offset;
+       size_t bytes_left = count;
+       size_t segment;
        u8                      *cp;
        ssize_t                 status;
        struct spi_transfer     t[2];
@@ -92,54 +96,58 @@ static int at25_ee_read(void *priv, unsigned int offset,
        if (unlikely(!count))
                return -EINVAL;
 
-       cp = at25->command;
+       do {
+               segment = min(bytes_left, max_chunk);
+               cp = at25->command;
 
-       instr = AT25_READ;
-       if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR)
-               if (offset >= BIT(at25->addrlen * 8))
-                       instr |= AT25_INSTR_BIT3;
+               instr = AT25_READ;
+               if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR)
+                       if (msg_offset >= BIT(at25->addrlen * 8))
+                               instr |= AT25_INSTR_BIT3;
 
-       mutex_lock(&at25->lock);
+               mutex_lock(&at25->lock);
 
-       *cp++ = instr;
-
-       /* 8/16/24-bit address is written MSB first */
-       switch (at25->addrlen) {
-       default:        /* case 3 */
-               *cp++ = offset >> 16;
-               fallthrough;
-       case 2:
-               *cp++ = offset >> 8;
-               fallthrough;
-       case 1:
-       case 0: /* can't happen: for better code generation */
-               *cp++ = offset >> 0;
-       }
+               *cp++ = instr;
 
-       spi_message_init(&m);
-       memset(t, 0, sizeof(t));
+               /* 8/16/24-bit address is written MSB first */
+               switch (at25->addrlen) {
+               default:        /* case 3 */
+                       *cp++ = msg_offset >> 16;
+                       fallthrough;
+               case 2:
+                       *cp++ = msg_offset >> 8;
+                       fallthrough;
+               case 1:
+               case 0: /* can't happen: for better code generation */
+                       *cp++ = msg_offset >> 0;
+               }
 
-       t[0].tx_buf = at25->command;
-       t[0].len = at25->addrlen + 1;
-       spi_message_add_tail(&t[0], &m);
+               spi_message_init(&m);
+               memset(t, 0, sizeof(t));
 
-       t[1].rx_buf = buf;
-       t[1].len = count;
-       spi_message_add_tail(&t[1], &m);
+               t[0].tx_buf = at25->command;
+               t[0].len = at25->addrlen + 1;
+               spi_message_add_tail(&t[0], &m);
 
-       /*
-        * Read it all at once.
-        *
-        * REVISIT that's potentially a problem with large chips, if
-        * other devices on the bus need to be accessed regularly or
-        * this chip is clocked very slowly.
-        */
-       status = spi_sync(at25->spi, &m);
-       dev_dbg(&at25->spi->dev, "read %zu bytes at %d --> %zd\n",
-               count, offset, status);
+               t[1].rx_buf = buf;
+               t[1].len = segment;
+               spi_message_add_tail(&t[1], &m);
 
-       mutex_unlock(&at25->lock);
-       return status;
+               status = spi_sync(at25->spi, &m);
+
+               mutex_unlock(&at25->lock);
+
+               if (status)
+                       return status;
+
+               msg_offset += segment;
+               buf += segment;
+               bytes_left -= segment;
+       } while (bytes_left > 0);
+
+       dev_dbg(&at25->spi->dev, "read %zu bytes at %d\n",
+               count, offset);
+       return 0;
 }
 
 /* Read extra registers as ID or serial number */
@@ -190,6 +198,7 @@ ATTRIBUTE_GROUPS(sernum);
 static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
 {
        struct at25_data *at25 = priv;
+       size_t maxsz = spi_max_transfer_size(at25->spi);
        const char *buf = val;
        int                     status = 0;
        unsigned                buf_size;
@@ -218,7 +227,7 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
        do {
                unsigned long   timeout, retries;
                unsigned        segment;
-               unsigned        offset = (unsigned) off;
+               unsigned        offset = off;
                u8              *cp = bounce;
                int             sr;
                u8              instr;
@@ -253,6 +262,8 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
                segment = buf_size - (offset % buf_size);
                if (segment > count)
                        segment = count;
+               if (segment > maxsz)
+                       segment = maxsz;
                memcpy(cp, buf, segment);
                status = spi_write(at25->spi, bounce,
                                segment + at25->addrlen + 1);
index 2e0aa74..95ef971 100644 (file)
@@ -13,10 +13,13 @@ lkdtm-$(CONFIG_LKDTM)               += cfi.o
 lkdtm-$(CONFIG_LKDTM)          += fortify.o
 lkdtm-$(CONFIG_PPC_64S_HASH_MMU)       += powerpc.o
 
-KASAN_SANITIZE_rodata.o                := n
 KASAN_SANITIZE_stackleak.o     := n
-KCOV_INSTRUMENT_rodata.o       := n
-CFLAGS_REMOVE_rodata.o         += $(CC_FLAGS_LTO)
+
+KASAN_SANITIZE_rodata.o                        := n
+KCSAN_SANITIZE_rodata.o                        := n
+KCOV_INSTRUMENT_rodata.o               := n
+OBJECT_FILES_NON_STANDARD_rodata.o     := y
+CFLAGS_REMOVE_rodata.o                 += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS)
 
 OBJCOPYFLAGS :=
 OBJCOPYFLAGS_rodata_objcopy.o  := \
index cebcca6..cf2b826 100644 (file)
@@ -1351,7 +1351,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 
                if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
                    dev->hbm_state != MEI_HBM_CAP_SETUP) {
-                       if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+                       if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+                           dev->dev_state == MEI_DEV_POWERING_DOWN) {
                                dev_dbg(dev->dev, "hbm: capabilities response: on shutdown, ignoring\n");
                                return 0;
                        }
index 64ce3f8..15e8e2b 100644 (file)
 #define MEI_DEV_ID_ADP_P      0x51E0  /* Alder Lake Point P */
 #define MEI_DEV_ID_ADP_N      0x54E0  /* Alder Lake Point N */
 
+#define MEI_DEV_ID_RPL_S      0x7A68  /* Raptor Lake Point S */
+
 /*
  * MEI HW Section
  */
index 9870bf7..befa491 100644 (file)
@@ -1154,6 +1154,8 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
                        ret = mei_me_d0i3_exit_sync(dev);
                        if (ret)
                                return ret;
+               } else {
+                       hw->pg_state = MEI_PG_OFF;
                }
        }
 
index 33e5882..5435604 100644 (file)
@@ -116,6 +116,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
 
+       {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
+
        /* required last entry */
        {0, }
 };
index 195dc89..9da4489 100644 (file)
@@ -1356,7 +1356,7 @@ static void msdc_data_xfer_next(struct msdc_host *host, struct mmc_request *mrq)
                msdc_request_done(host, mrq);
 }
 
-static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
+static void msdc_data_xfer_done(struct msdc_host *host, u32 events,
                                struct mmc_request *mrq, struct mmc_data *data)
 {
        struct mmc_command *stop;
@@ -1376,7 +1376,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
        spin_unlock_irqrestore(&host->lock, flags);
 
        if (done)
-               return true;
+               return;
        stop = data->stop;
 
        if (check_data || (stop && stop->error)) {
@@ -1385,12 +1385,15 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
                sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP,
                                1);
 
+               ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CTRL, val,
+                                               !(val & MSDC_DMA_CTRL_STOP), 1, 20000);
+               if (ret)
+                       dev_dbg(host->dev, "DMA stop timed out\n");
+
                ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CFG, val,
                                                !(val & MSDC_DMA_CFG_STS), 1, 20000);
-               if (ret) {
-                       dev_dbg(host->dev, "DMA stop timed out\n");
-                       return false;
-               }
+               if (ret)
+                       dev_dbg(host->dev, "DMA inactive timed out\n");
 
                sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask);
                dev_dbg(host->dev, "DMA stop\n");
@@ -1415,9 +1418,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
                }
 
                msdc_data_xfer_next(host, mrq);
-               done = true;
        }
-       return done;
 }
 
 static void msdc_set_buswidth(struct msdc_host *host, u32 width)
@@ -2416,6 +2417,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
        if (recovery) {
                sdr_set_field(host->base + MSDC_DMA_CTRL,
                              MSDC_DMA_CTRL_STOP, 1);
+               if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CTRL, val,
+                       !(val & MSDC_DMA_CTRL_STOP), 1, 3000)))
+                       return;
                if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val,
                        !(val & MSDC_DMA_CFG_STS), 1, 3000)))
                        return;
index 86e867f..033be55 100644 (file)
@@ -1298,8 +1298,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
        /*
         * omap_device_pm_domain has callbacks to enable the main
         * functional clock, interface clock and also configure the
-        * SYSCONFIG register of omap devices. The callback will be invoked
-        * as part of pm_runtime_get_sync.
+        * SYSCONFIG register to clear any boot loader set voltage
+        * capabilities before calling sdhci_setup_host(). The
+        * callback will be invoked as part of pm_runtime_get_sync.
         */
        pm_runtime_use_autosuspend(dev);
        pm_runtime_set_autosuspend_delay(dev, 50);
@@ -1441,7 +1442,8 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
 
-       sdhci_runtime_suspend_host(host);
+       if (omap_host->con != -EINVAL)
+               sdhci_runtime_suspend_host(host);
 
        sdhci_omap_context_save(omap_host);
 
@@ -1458,10 +1460,10 @@ static int __maybe_unused sdhci_omap_runtime_resume(struct device *dev)
 
        pinctrl_pm_select_default_state(dev);
 
-       if (omap_host->con != -EINVAL)
+       if (omap_host->con != -EINVAL) {
                sdhci_omap_context_restore(omap_host);
-
-       sdhci_runtime_resume_host(host, 0);
+               sdhci_runtime_resume_host(host, 0);
+       }
 
        return 0;
 }
index 92c20cb..0d4d343 100644 (file)
@@ -152,6 +152,8 @@ static int sdhci_o2_get_cd(struct mmc_host *mmc)
 
        if (!(sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1) & O2_PLL_LOCK_STATUS))
                sdhci_o2_enable_internal_clock(host);
+       else
+               sdhci_o2_wait_card_detect_stable(host);
 
        return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
 }
index 0b68d05..93da236 100644 (file)
@@ -850,9 +850,10 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
        unsigned int tRP_ps;
        bool use_half_period;
        int sample_delay_ps, sample_delay_factor;
-       u16 busy_timeout_cycles;
+       unsigned int busy_timeout_cycles;
        u8 wrn_dly_sel;
        unsigned long clk_rate, min_rate;
+       u64 busy_timeout_ps;
 
        if (sdr->tRC_min >= 30000) {
                /* ONFI non-EDO modes [0-3] */
@@ -885,12 +886,13 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
        addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
        data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
        data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
-       busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
+       busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
+       busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
 
        hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
                      BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
                      BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
-       hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
+       hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(DIV_ROUND_UP(busy_timeout_cycles, 4096));
 
        /*
         * Derive NFC ideal delay from {3}:
index 88c2440..dacc552 100644 (file)
@@ -29,9 +29,6 @@ struct nand_flash_dev nand_flash_ids[] = {
        {"TC58NVG0S3E 1G 3.3V 8-bit",
                { .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} },
                  SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512), },
-       {"TC58NVG0S3HTA00 1G 3.3V 8-bit",
-               { .id = {0x98, 0xf1, 0x80, 0x15} },
-                 SZ_2K, SZ_128, SZ_128K, 0, 4, 128, NAND_ECC_INFO(8, SZ_512), },
        {"TC58NVG2S0F 4G 3.3V 8-bit",
                { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
                  SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
index b2a4f99..8c1eeb5 100644 (file)
@@ -94,6 +94,7 @@ config WIREGUARD
        select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
        select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2
        select CRYPTO_POLY1305_MIPS if MIPS
+       select CRYPTO_CHACHA_S390 if S390
        help
          WireGuard is a secure, fast, and easy to use replacement for IPSec
          that uses modern cryptography and clever networking tricks. It's
index be2719a..e019526 100644 (file)
@@ -563,7 +563,7 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
        ihv3->nsrcs     = 0;
        ihv3->resv      = 0;
        ihv3->suppress  = false;
-       ihv3->qrv       = amt->net->ipv4.sysctl_igmp_qrv;
+       ihv3->qrv       = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
        ihv3->csum      = 0;
        csum            = &ihv3->csum;
        csum_start      = (void *)ihv3;
@@ -577,14 +577,14 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
        return skb;
 }
 
-static void __amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
-                                  bool validate)
+static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
+                                bool validate)
 {
        if (validate && amt->status >= status)
                return;
        netdev_dbg(amt->dev, "Update GW status %s -> %s",
                   status_str[amt->status], status_str[status]);
-       amt->status = status;
+       WRITE_ONCE(amt->status, status);
 }
 
 static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
@@ -600,14 +600,6 @@ static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
        tunnel->status = status;
 }
 
-static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
-                                bool validate)
-{
-       spin_lock_bh(&amt->lock);
-       __amt_update_gw_status(amt, status, validate);
-       spin_unlock_bh(&amt->lock);
-}
-
 static void amt_update_relay_status(struct amt_tunnel_list *tunnel,
                                    enum amt_status status, bool validate)
 {
@@ -700,9 +692,7 @@ static void amt_send_discovery(struct amt_dev *amt)
        if (unlikely(net_xmit_eval(err)))
                amt->dev->stats.tx_errors++;
 
-       spin_lock_bh(&amt->lock);
-       __amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
-       spin_unlock_bh(&amt->lock);
+       amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
 out:
        rcu_read_unlock();
 }
@@ -900,6 +890,28 @@ static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
 }
 #endif
 
+static bool amt_queue_event(struct amt_dev *amt, enum amt_event event,
+                           struct sk_buff *skb)
+{
+       int index;
+
+       spin_lock_bh(&amt->lock);
+       if (amt->nr_events >= AMT_MAX_EVENTS) {
+               spin_unlock_bh(&amt->lock);
+               return 1;
+       }
+
+       index = (amt->event_idx + amt->nr_events) % AMT_MAX_EVENTS;
+       amt->events[index].event = event;
+       amt->events[index].skb = skb;
+       amt->nr_events++;
+       amt->event_idx %= AMT_MAX_EVENTS;
+       queue_work(amt_wq, &amt->event_wq);
+       spin_unlock_bh(&amt->lock);
+
+       return 0;
+}
+
 static void amt_secret_work(struct work_struct *work)
 {
        struct amt_dev *amt = container_of(to_delayed_work(work),
@@ -913,58 +925,72 @@ static void amt_secret_work(struct work_struct *work)
                         msecs_to_jiffies(AMT_SECRET_TIMEOUT));
 }
 
-static void amt_discovery_work(struct work_struct *work)
+static void amt_event_send_discovery(struct amt_dev *amt)
 {
-       struct amt_dev *amt = container_of(to_delayed_work(work),
-                                          struct amt_dev,
-                                          discovery_wq);
-
-       spin_lock_bh(&amt->lock);
        if (amt->status > AMT_STATUS_SENT_DISCOVERY)
                goto out;
        get_random_bytes(&amt->nonce, sizeof(__be32));
-       spin_unlock_bh(&amt->lock);
 
        amt_send_discovery(amt);
-       spin_lock_bh(&amt->lock);
 out:
        mod_delayed_work(amt_wq, &amt->discovery_wq,
                         msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
-       spin_unlock_bh(&amt->lock);
 }
 
-static void amt_req_work(struct work_struct *work)
+static void amt_discovery_work(struct work_struct *work)
 {
        struct amt_dev *amt = container_of(to_delayed_work(work),
                                           struct amt_dev,
-                                          req_wq);
+                                          discovery_wq);
+
+       if (amt_queue_event(amt, AMT_EVENT_SEND_DISCOVERY, NULL))
+               mod_delayed_work(amt_wq, &amt->discovery_wq,
+                                msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
+}
+
+static void amt_event_send_request(struct amt_dev *amt)
+{
        u32 exp;
 
-       spin_lock_bh(&amt->lock);
        if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT)
                goto out;
 
        if (amt->req_cnt > AMT_MAX_REQ_COUNT) {
                netdev_dbg(amt->dev, "Gateway is not ready");
                amt->qi = AMT_INIT_REQ_TIMEOUT;
-               amt->ready4 = false;
-               amt->ready6 = false;
+               WRITE_ONCE(amt->ready4, false);
+               WRITE_ONCE(amt->ready6, false);
                amt->remote_ip = 0;
-               __amt_update_gw_status(amt, AMT_STATUS_INIT, false);
+               amt_update_gw_status(amt, AMT_STATUS_INIT, false);
                amt->req_cnt = 0;
+               amt->nonce = 0;
                goto out;
        }
-       spin_unlock_bh(&amt->lock);
+
+       if (!amt->req_cnt) {
+               WRITE_ONCE(amt->ready4, false);
+               WRITE_ONCE(amt->ready6, false);
+               get_random_bytes(&amt->nonce, sizeof(__be32));
+       }
 
        amt_send_request(amt, false);
        amt_send_request(amt, true);
-       spin_lock_bh(&amt->lock);
-       __amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
+       amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
        amt->req_cnt++;
 out:
        exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
        mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
-       spin_unlock_bh(&amt->lock);
+}
+
+static void amt_req_work(struct work_struct *work)
+{
+       struct amt_dev *amt = container_of(to_delayed_work(work),
+                                          struct amt_dev,
+                                          req_wq);
+
+       if (amt_queue_event(amt, AMT_EVENT_SEND_REQUEST, NULL))
+               mod_delayed_work(amt_wq, &amt->req_wq,
+                                msecs_to_jiffies(100));
 }
 
 static bool amt_send_membership_update(struct amt_dev *amt,
@@ -1220,7 +1246,8 @@ static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev)
                /* Gateway only passes IGMP/MLD packets */
                if (!report)
                        goto free;
-               if ((!v6 && !amt->ready4) || (v6 && !amt->ready6))
+               if ((!v6 && !READ_ONCE(amt->ready4)) ||
+                   (v6 && !READ_ONCE(amt->ready6)))
                        goto free;
                if (amt_send_membership_update(amt, skb,  v6))
                        goto free;
@@ -2236,6 +2263,10 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
            ipv4_is_zeronet(amta->ip4))
                return true;
 
+       if (amt->status != AMT_STATUS_SENT_DISCOVERY ||
+           amt->nonce != amta->nonce)
+               return true;
+
        amt->remote_ip = amta->ip4;
        netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip);
        mod_delayed_work(amt_wq, &amt->req_wq, 0);
@@ -2251,6 +2282,9 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
        struct ethhdr *eth;
        struct iphdr *iph;
 
+       if (READ_ONCE(amt->status) != AMT_STATUS_SENT_UPDATE)
+               return true;
+
        hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
        if (!pskb_may_pull(skb, hdr_size))
                return true;
@@ -2325,6 +2359,9 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
        if (amtmq->reserved || amtmq->version)
                return true;
 
+       if (amtmq->nonce != amt->nonce)
+               return true;
+
        hdr_size -= sizeof(*eth);
        if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
                return true;
@@ -2339,6 +2376,9 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
 
        iph = ip_hdr(skb);
        if (iph->version == 4) {
+               if (READ_ONCE(amt->ready4))
+                       return true;
+
                if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
                                   sizeof(*ihv3)))
                        return true;
@@ -2349,12 +2389,10 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
                ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
                skb_reset_transport_header(skb);
                skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
-               spin_lock_bh(&amt->lock);
-               amt->ready4 = true;
+               WRITE_ONCE(amt->ready4, true);
                amt->mac = amtmq->response_mac;
                amt->req_cnt = 0;
                amt->qi = ihv3->qqic;
-               spin_unlock_bh(&amt->lock);
                skb->protocol = htons(ETH_P_IP);
                eth->h_proto = htons(ETH_P_IP);
                ip_eth_mc_map(iph->daddr, eth->h_dest);
@@ -2363,6 +2401,9 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
                struct mld2_query *mld2q;
                struct ipv6hdr *ip6h;
 
+               if (READ_ONCE(amt->ready6))
+                       return true;
+
                if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
                                   sizeof(*mld2q)))
                        return true;
@@ -2374,12 +2415,10 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
                mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
                skb_reset_transport_header(skb);
                skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
-               spin_lock_bh(&amt->lock);
-               amt->ready6 = true;
+               WRITE_ONCE(amt->ready6, true);
                amt->mac = amtmq->response_mac;
                amt->req_cnt = 0;
                amt->qi = mld2q->mld2q_qqic;
-               spin_unlock_bh(&amt->lock);
                skb->protocol = htons(ETH_P_IPV6);
                eth->h_proto = htons(ETH_P_IPV6);
                ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
@@ -2392,12 +2431,14 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
        skb->pkt_type = PACKET_MULTICAST;
        skb->ip_summed = CHECKSUM_NONE;
        len = skb->len;
+       local_bh_disable();
        if (__netif_rx(skb) == NET_RX_SUCCESS) {
                amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
                dev_sw_netstats_rx_add(amt->dev, len);
        } else {
                amt->dev->stats.rx_dropped++;
        }
+       local_bh_enable();
 
        return false;
 }
@@ -2638,7 +2679,9 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
                if (tunnel->ip4 == iph->saddr)
                        goto send;
 
+       spin_lock_bh(&amt->lock);
        if (amt->nr_tunnels >= amt->max_tunnels) {
+               spin_unlock_bh(&amt->lock);
                icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
                return true;
        }
@@ -2646,8 +2689,10 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
        tunnel = kzalloc(sizeof(*tunnel) +
                         (sizeof(struct hlist_head) * amt->hash_buckets),
                         GFP_ATOMIC);
-       if (!tunnel)
+       if (!tunnel) {
+               spin_unlock_bh(&amt->lock);
                return true;
+       }
 
        tunnel->source_port = udph->source;
        tunnel->ip4 = iph->saddr;
@@ -2660,10 +2705,9 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
 
        INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire);
 
-       spin_lock_bh(&amt->lock);
        list_add_tail_rcu(&tunnel->list, &amt->tunnel_list);
        tunnel->key = amt->key;
-       amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
+       __amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
        amt->nr_tunnels++;
        mod_delayed_work(amt_wq, &tunnel->gc_wq,
                         msecs_to_jiffies(amt_gmi(amt)));
@@ -2688,6 +2732,38 @@ send:
        return false;
 }
 
+static void amt_gw_rcv(struct amt_dev *amt, struct sk_buff *skb)
+{
+       int type = amt_parse_type(skb);
+       int err = 1;
+
+       if (type == -1)
+               goto drop;
+
+       if (amt->mode == AMT_MODE_GATEWAY) {
+               switch (type) {
+               case AMT_MSG_ADVERTISEMENT:
+                       err = amt_advertisement_handler(amt, skb);
+                       break;
+               case AMT_MSG_MEMBERSHIP_QUERY:
+                       err = amt_membership_query_handler(amt, skb);
+                       if (!err)
+                               return;
+                       break;
+               default:
+                       netdev_dbg(amt->dev, "Invalid type of Gateway\n");
+                       break;
+               }
+       }
+drop:
+       if (err) {
+               amt->dev->stats.rx_dropped++;
+               kfree_skb(skb);
+       } else {
+               consume_skb(skb);
+       }
+}
+
 static int amt_rcv(struct sock *sk, struct sk_buff *skb)
 {
        struct amt_dev *amt;
@@ -2719,8 +2795,12 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
                                err = true;
                                goto drop;
                        }
-                       err = amt_advertisement_handler(amt, skb);
-                       break;
+                       if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
+                               netdev_dbg(amt->dev, "AMT Event queue full\n");
+                               err = true;
+                               goto drop;
+                       }
+                       goto out;
                case AMT_MSG_MULTICAST_DATA:
                        if (iph->saddr != amt->remote_ip) {
                                netdev_dbg(amt->dev, "Invalid Relay IP\n");
@@ -2738,11 +2818,12 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
                                err = true;
                                goto drop;
                        }
-                       err = amt_membership_query_handler(amt, skb);
-                       if (err)
+                       if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
+                               netdev_dbg(amt->dev, "AMT Event queue full\n");
+                               err = true;
                                goto drop;
-                       else
-                               goto out;
+                       }
+                       goto out;
                default:
                        err = true;
                        netdev_dbg(amt->dev, "Invalid type of Gateway\n");
@@ -2780,6 +2861,46 @@ out:
        return 0;
 }
 
+static void amt_event_work(struct work_struct *work)
+{
+       struct amt_dev *amt = container_of(work, struct amt_dev, event_wq);
+       struct sk_buff *skb;
+       u8 event;
+       int i;
+
+       for (i = 0; i < AMT_MAX_EVENTS; i++) {
+               spin_lock_bh(&amt->lock);
+               if (amt->nr_events == 0) {
+                       spin_unlock_bh(&amt->lock);
+                       return;
+               }
+               event = amt->events[amt->event_idx].event;
+               skb = amt->events[amt->event_idx].skb;
+               amt->events[amt->event_idx].event = AMT_EVENT_NONE;
+               amt->events[amt->event_idx].skb = NULL;
+               amt->nr_events--;
+               amt->event_idx++;
+               amt->event_idx %= AMT_MAX_EVENTS;
+               spin_unlock_bh(&amt->lock);
+
+               switch (event) {
+               case AMT_EVENT_RECEIVE:
+                       amt_gw_rcv(amt, skb);
+                       break;
+               case AMT_EVENT_SEND_DISCOVERY:
+                       amt_event_send_discovery(amt);
+                       break;
+               case AMT_EVENT_SEND_REQUEST:
+                       amt_event_send_request(amt);
+                       break;
+               default:
+                       if (skb)
+                               kfree_skb(skb);
+                       break;
+               }
+       }
+}
+
 static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
 {
        struct amt_dev *amt;
@@ -2804,7 +2925,7 @@ static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
                break;
        case AMT_MSG_REQUEST:
        case AMT_MSG_MEMBERSHIP_UPDATE:
-               if (amt->status >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
+               if (READ_ONCE(amt->status) >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
                        mod_delayed_work(amt_wq, &amt->req_wq, 0);
                break;
        default:
@@ -2867,6 +2988,8 @@ static int amt_dev_open(struct net_device *dev)
 
        amt->ready4 = false;
        amt->ready6 = false;
+       amt->event_idx = 0;
+       amt->nr_events = 0;
 
        err = amt_socket_create(amt);
        if (err)
@@ -2874,6 +2997,7 @@ static int amt_dev_open(struct net_device *dev)
 
        amt->req_cnt = 0;
        amt->remote_ip = 0;
+       amt->nonce = 0;
        get_random_bytes(&amt->key, sizeof(siphash_key_t));
 
        amt->status = AMT_STATUS_INIT;
@@ -2892,6 +3016,8 @@ static int amt_dev_stop(struct net_device *dev)
        struct amt_dev *amt = netdev_priv(dev);
        struct amt_tunnel_list *tunnel, *tmp;
        struct socket *sock;
+       struct sk_buff *skb;
+       int i;
 
        cancel_delayed_work_sync(&amt->req_wq);
        cancel_delayed_work_sync(&amt->discovery_wq);
@@ -2904,6 +3030,15 @@ static int amt_dev_stop(struct net_device *dev)
        if (sock)
                udp_tunnel_sock_release(sock);
 
+       cancel_work_sync(&amt->event_wq);
+       for (i = 0; i < AMT_MAX_EVENTS; i++) {
+               skb = amt->events[i].skb;
+               if (skb)
+                       kfree_skb(skb);
+               amt->events[i].event = AMT_EVENT_NONE;
+               amt->events[i].skb = NULL;
+       }
+
        amt->ready4 = false;
        amt->ready6 = false;
        amt->req_cnt = 0;
@@ -3095,7 +3230,7 @@ static int amt_newlink(struct net *net, struct net_device *dev,
                goto err;
        }
        if (amt->mode == AMT_MODE_RELAY) {
-               amt->qrv = amt->net->ipv4.sysctl_igmp_qrv;
+               amt->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
                amt->qri = 10;
                dev->needed_headroom = amt->stream_dev->needed_headroom +
                                       AMT_RELAY_HLEN;
@@ -3146,8 +3281,8 @@ static int amt_newlink(struct net *net, struct net_device *dev,
        INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work);
        INIT_DELAYED_WORK(&amt->req_wq, amt_req_work);
        INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work);
+       INIT_WORK(&amt->event_wq, amt_event_work);
        INIT_LIST_HEAD(&amt->tunnel_list);
-
        return 0;
 err:
        dev_put(amt->stream_dev);
@@ -3280,7 +3415,7 @@ static int __init amt_init(void)
        if (err < 0)
                goto unregister_notifier;
 
-       amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 1);
+       amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 0);
        if (!amt_wq) {
                err = -ENOMEM;
                goto rtnl_unregister;
index a86b1f7..d7fb33c 100644 (file)
@@ -2228,7 +2228,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
                                temp_aggregator->num_of_ports--;
                                if (__agg_active_ports(temp_aggregator) == 0) {
                                        select_new_active_agg = temp_aggregator->is_active;
-                                       ad_clear_agg(temp_aggregator);
+                                       if (temp_aggregator->num_of_ports == 0)
+                                               ad_clear_agg(temp_aggregator);
                                        if (select_new_active_agg) {
                                                slave_info(bond->dev, slave->dev, "Removing an active aggregator\n");
                                                /* select new active aggregator */
index 303c8d3..007d43e 100644 (file)
@@ -1302,12 +1302,12 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
                return res;
 
        if (rlb_enabled) {
-               bond->alb_info.rlb_enabled = 1;
                res = rlb_initialize(bond);
                if (res) {
                        tlb_deinitialize(bond);
                        return res;
                }
+               bond->alb_info.rlb_enabled = 1;
        } else {
                bond->alb_info.rlb_enabled = 0;
        }
index f85372a..6ba4c83 100644 (file)
@@ -3684,9 +3684,11 @@ re_arm:
                if (!rtnl_trylock())
                        return;
 
-               if (should_notify_peers)
+               if (should_notify_peers) {
+                       bond->send_peer_notif--;
                        call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
                                                 bond->dev);
+               }
                if (should_notify_rtnl) {
                        bond_slave_state_notify(bond);
                        bond_slave_link_notify(bond);
index 5458f57..0b0f234 100644 (file)
@@ -722,13 +722,21 @@ static int cfv_probe(struct virtio_device *vdev)
        /* Carrier is off until netdevice is opened */
        netif_carrier_off(netdev);
 
+       /* serialize netdev register + virtio_device_ready() with ndo_open() */
+       rtnl_lock();
+
        /* register Netdev */
-       err = register_netdev(netdev);
+       err = register_netdevice(netdev);
        if (err) {
+               rtnl_unlock();
                dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
                goto err;
        }
 
+       virtio_device_ready(vdev);
+
+       rtnl_unlock();
+
        debugfs_init(cfv);
 
        return 0;
index 76df480..4c47c10 100644 (file)
@@ -1646,7 +1646,6 @@ static int grcan_probe(struct platform_device *ofdev)
         */
        sysid_parent = of_find_node_by_path("/ambapp0");
        if (sysid_parent) {
-               of_node_get(sysid_parent);
                err = of_property_read_u32(sysid_parent, "systemid", &sysid);
                if (!err && ((sysid & GRLIB_VERSION_MASK) >=
                             GRCAN_TXBUG_SAFE_GRLIB_VERSION))
index 5d0c82d..7931f9c 100644 (file)
@@ -529,7 +529,7 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
        /* acknowledge rx fifo 0 */
        m_can_write(cdev, M_CAN_RXF0A, fgi);
 
-       timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc);
+       timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc) << 16;
 
        m_can_receive_skb(cdev, skb, timestamp);
 
@@ -1030,7 +1030,7 @@ static int m_can_echo_tx_event(struct net_device *dev)
                }
 
                msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe);
-               timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe);
+               timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe) << 16;
 
                /* ack txe element */
                m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
@@ -1351,7 +1351,9 @@ static void m_can_chip_config(struct net_device *dev)
        /* enable internal timestamp generation, with a prescalar of 16. The
         * prescalar is applied to the nominal bit timing
         */
-       m_can_write(cdev, M_CAN_TSCC, FIELD_PREP(TSCC_TCP_MASK, 0xf));
+       m_can_write(cdev, M_CAN_TSCC,
+                   FIELD_PREP(TSCC_TCP_MASK, 0xf) |
+                   FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL));
 
        m_can_config_endisable(cdev, false);
 
index 40a1144..cb0321e 100644 (file)
@@ -1332,7 +1332,10 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
                cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
                       RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
 
-               rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
+               if (is_v3u(gpriv))
+                       rcar_canfd_write(priv->base, RCANFD_V3U_DCFG(ch), cfg);
+               else
+                       rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
                netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
                           brp, sjw, tseg1, tseg2);
        } else {
@@ -1840,6 +1843,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
                of_child = of_get_child_by_name(pdev->dev.of_node, name);
                if (of_child && of_device_is_available(of_child))
                        channels_mask |= BIT(i);
+               of_node_put(of_child);
        }
 
        if (chip_id != RENESAS_RZG2L) {
index b212523..bc65185 100644 (file)
@@ -12,6 +12,7 @@
 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
 //
 
+#include <asm/unaligned.h>
 #include <linux/bitfield.h>
 #include <linux/clk.h>
 #include <linux/device.h>
@@ -1650,6 +1651,7 @@ static int mcp251xfd_stop(struct net_device *ndev)
        netif_stop_queue(ndev);
        set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
        hrtimer_cancel(&priv->rx_irq_timer);
+       hrtimer_cancel(&priv->tx_irq_timer);
        mcp251xfd_chip_interrupts_disable(priv);
        free_irq(ndev->irq, priv);
        can_rx_offload_disable(&priv->offload);
@@ -1688,8 +1690,8 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
        u32 osc;
        int err;
 
-       /* The OSC_LPMEN is only supported on MCP2518FD, so use it to
-        * autodetect the model.
+       /* The OSC_LPMEN is only supported on MCP2518FD and MCP251863,
+        * so use it to autodetect the model.
         */
        err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
                                 MCP251XFD_REG_OSC_LPMEN,
@@ -1701,10 +1703,18 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
        if (err)
                return err;
 
-       if (osc & MCP251XFD_REG_OSC_LPMEN)
-               devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
-       else
+       if (osc & MCP251XFD_REG_OSC_LPMEN) {
+               /* We cannot distinguish between MCP2518FD and
+                * MCP251863. If firmware specifies MCP251863, keep
+                * it, otherwise set to MCP2518FD.
+                */
+               if (mcp251xfd_is_251863(priv))
+                       devtype_data = &mcp251xfd_devtype_data_mcp251863;
+               else
+                       devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
+       } else {
                devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
+       }
 
        if (!mcp251xfd_is_251XFD(priv) &&
            priv->devtype_data.model != devtype_data->model) {
@@ -1777,7 +1787,7 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
        xfer[0].len = sizeof(buf_tx->cmd);
        xfer[0].speed_hz = priv->spi_max_speed_hz_slow;
        xfer[1].rx_buf = buf_rx->data;
-       xfer[1].len = sizeof(dev_id);
+       xfer[1].len = sizeof(*dev_id);
        xfer[1].speed_hz = priv->spi_max_speed_hz_fast;
 
        mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
@@ -1786,7 +1796,7 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
        if (err)
                goto out_kfree_buf_tx;
 
-       *dev_id = be32_to_cpup((__be32 *)buf_rx->data);
+       *dev_id = get_unaligned_le32(buf_rx->data);
        *effective_speed_hz_slow = xfer[0].effective_speed_hz;
        *effective_speed_hz_fast = xfer[1].effective_speed_hz;
 
index 217510c..92b7bc7 100644 (file)
@@ -334,19 +334,21 @@ mcp251xfd_regmap_crc_read(void *context,
                 * register. It increments once per SYS clock tick,
                 * which is 20 or 40 MHz.
                 *
-                * Observation shows that if the lowest byte (which is
-                * transferred first on the SPI bus) of that register
-                * is 0x00 or 0x80 the calculated CRC doesn't always
-                * match the transferred one.
+                * Observation on the mcp2518fd shows that if the
+                * lowest byte (which is transferred first on the SPI
+                * bus) of that register is 0x00 or 0x80 the
+                * calculated CRC doesn't always match the transferred
+                * one. On the mcp2517fd this problem is not limited
+                * to the first byte being 0x00 or 0x80.
                 *
                 * If the highest bit in the lowest byte is flipped
                 * the transferred CRC matches the calculated one. We
-                * assume for now the CRC calculation in the chip
-                * works on wrong data and the transferred data is
-                * correct.
+                * assume for now the CRC operates on the correct
+                * data.
                 */
                if (reg == MCP251XFD_REG_TBC &&
-                   (buf_rx->data[0] == 0x0 || buf_rx->data[0] == 0x80)) {
+                   ((buf_rx->data[0] & 0xf8) == 0x0 ||
+                    (buf_rx->data[0] & 0xf8) == 0x80)) {
                        /* Flip highest bit in lowest byte of le32 */
                        buf_rx->data[0] ^= 0x80;
 
@@ -356,10 +358,8 @@ mcp251xfd_regmap_crc_read(void *context,
                                                                  val_len);
                        if (!err) {
                                /* If CRC is now correct, assume
-                                * transferred data was OK, flip bit
-                                * back to original value.
+                                * flipped data is OK.
                                 */
-                               buf_rx->data[0] ^= 0x80;
                                goto out;
                        }
                }
index b29ba91..d3a658b 100644 (file)
@@ -268,6 +268,8 @@ struct gs_can {
 
        struct usb_anchor tx_submitted;
        atomic_t active_tx_urbs;
+       void *rxbuf[GS_MAX_RX_URBS];
+       dma_addr_t rxbuf_dma[GS_MAX_RX_URBS];
 };
 
 /* usb interface struct */
@@ -742,6 +744,7 @@ static int gs_can_open(struct net_device *netdev)
                for (i = 0; i < GS_MAX_RX_URBS; i++) {
                        struct urb *urb;
                        u8 *buf;
+                       dma_addr_t buf_dma;
 
                        /* alloc rx urb */
                        urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -752,7 +755,7 @@ static int gs_can_open(struct net_device *netdev)
                        buf = usb_alloc_coherent(dev->udev,
                                                 dev->parent->hf_size_rx,
                                                 GFP_KERNEL,
-                                                &urb->transfer_dma);
+                                                &buf_dma);
                        if (!buf) {
                                netdev_err(netdev,
                                           "No memory left for USB buffer\n");
@@ -760,6 +763,8 @@ static int gs_can_open(struct net_device *netdev)
                                return -ENOMEM;
                        }
 
+                       urb->transfer_dma = buf_dma;
+
                        /* fill, anchor, and submit rx urb */
                        usb_fill_bulk_urb(urb,
                                          dev->udev,
@@ -781,10 +786,17 @@ static int gs_can_open(struct net_device *netdev)
                                           "usb_submit failed (err=%d)\n", rc);
 
                                usb_unanchor_urb(urb);
+                               usb_free_coherent(dev->udev,
+                                                 sizeof(struct gs_host_frame),
+                                                 buf,
+                                                 buf_dma);
                                usb_free_urb(urb);
                                break;
                        }
 
+                       dev->rxbuf[i] = buf;
+                       dev->rxbuf_dma[i] = buf_dma;
+
                        /* Drop reference,
                         * USB core will take care of freeing it
                         */
@@ -842,13 +854,20 @@ static int gs_can_close(struct net_device *netdev)
        int rc;
        struct gs_can *dev = netdev_priv(netdev);
        struct gs_usb *parent = dev->parent;
+       unsigned int i;
 
        netif_stop_queue(netdev);
 
        /* Stop polling */
        parent->active_channels--;
-       if (!parent->active_channels)
+       if (!parent->active_channels) {
                usb_kill_anchored_urbs(&parent->rx_submitted);
+               for (i = 0; i < GS_MAX_RX_URBS; i++)
+                       usb_free_coherent(dev->udev,
+                                         sizeof(struct gs_host_frame),
+                                         dev->rxbuf[i],
+                                         dev->rxbuf_dma[i]);
+       }
 
        /* Stop sending URBs */
        usb_kill_anchored_urbs(&dev->tx_submitted);
index 3a49257..eefcbe3 100644 (file)
 #define KVASER_USB_RX_BUFFER_SIZE              3072
 #define KVASER_USB_MAX_NET_DEVICES             5
 
-/* USB devices features */
-#define KVASER_USB_HAS_SILENT_MODE             BIT(0)
-#define KVASER_USB_HAS_TXRX_ERRORS             BIT(1)
+/* Kvaser USB device quirks */
+#define KVASER_USB_QUIRK_HAS_SILENT_MODE       BIT(0)
+#define KVASER_USB_QUIRK_HAS_TXRX_ERRORS       BIT(1)
+#define KVASER_USB_QUIRK_IGNORE_CLK_FREQ       BIT(2)
 
 /* Device capabilities */
 #define KVASER_USB_CAP_BERR_CAP                        0x01
@@ -65,12 +66,7 @@ struct kvaser_usb_dev_card_data_hydra {
 struct kvaser_usb_dev_card_data {
        u32 ctrlmode_supported;
        u32 capabilities;
-       union {
-               struct {
-                       enum kvaser_usb_leaf_family family;
-               } leaf;
-               struct kvaser_usb_dev_card_data_hydra hydra;
-       };
+       struct kvaser_usb_dev_card_data_hydra hydra;
 };
 
 /* Context for an outstanding, not yet ACKed, transmission */
@@ -83,7 +79,7 @@ struct kvaser_usb {
        struct usb_device *udev;
        struct usb_interface *intf;
        struct kvaser_usb_net_priv *nets[KVASER_USB_MAX_NET_DEVICES];
-       const struct kvaser_usb_dev_ops *ops;
+       const struct kvaser_usb_driver_info *driver_info;
        const struct kvaser_usb_dev_cfg *cfg;
 
        struct usb_endpoint_descriptor *bulk_in, *bulk_out;
@@ -165,6 +161,12 @@ struct kvaser_usb_dev_ops {
                                  u16 transid);
 };
 
+struct kvaser_usb_driver_info {
+       u32 quirks;
+       enum kvaser_usb_leaf_family family;
+       const struct kvaser_usb_dev_ops *ops;
+};
+
 struct kvaser_usb_dev_cfg {
        const struct can_clock clock;
        const unsigned int timestamp_freq;
@@ -184,4 +186,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
                              int len);
 
 int kvaser_usb_can_rx_over_error(struct net_device *netdev);
+
+extern const struct can_bittiming_const kvaser_usb_flexc_bittiming_const;
+
 #endif /* KVASER_USB_H */
index e67658b..f211bfc 100644 (file)
@@ -61,8 +61,6 @@
 #define USB_USBCAN_R_V2_PRODUCT_ID             294
 #define USB_LEAF_LIGHT_R_V2_PRODUCT_ID         295
 #define USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID   296
-#define USB_LEAF_PRODUCT_ID_END \
-       USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID
 
 /* Kvaser USBCan-II devices product ids */
 #define USB_USBCAN_REVB_PRODUCT_ID             2
 #define USB_USBCAN_PRO_4HS_PRODUCT_ID          276
 #define USB_HYBRID_CANLIN_PRODUCT_ID           277
 #define USB_HYBRID_PRO_CANLIN_PRODUCT_ID       278
-#define USB_HYDRA_PRODUCT_ID_END \
-       USB_HYBRID_PRO_CANLIN_PRODUCT_ID
 
-static inline bool kvaser_is_leaf(const struct usb_device_id *id)
-{
-       return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
-               id->idProduct <= USB_CAN_R_PRODUCT_ID) ||
-               (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID &&
-                id->idProduct <= USB_LEAF_PRODUCT_ID_END);
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
+       .quirks = 0,
+       .ops = &kvaser_usb_hydra_dev_ops,
+};
 
-static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
-{
-       return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
-              id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_usbcan = {
+       .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+                 KVASER_USB_QUIRK_HAS_SILENT_MODE,
+       .family = KVASER_USBCAN,
+       .ops = &kvaser_usb_leaf_dev_ops,
+};
 
-static inline bool kvaser_is_hydra(const struct usb_device_id *id)
-{
-       return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID &&
-              id->idProduct <= USB_HYDRA_PRODUCT_ID_END;
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf = {
+       .quirks = KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+       .family = KVASER_LEAF,
+       .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err = {
+       .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+                 KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+       .family = KVASER_LEAF,
+       .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_listen = {
+       .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+                 KVASER_USB_QUIRK_HAS_SILENT_MODE |
+                 KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+       .family = KVASER_LEAF,
+       .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = {
+       .quirks = 0,
+       .ops = &kvaser_usb_leaf_dev_ops,
+};
 
 static const struct usb_device_id kvaser_usb_table[] = {
-       /* Leaf USB product IDs */
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
+       /* Leaf M32C USB product IDs */
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
-                              KVASER_USB_HAS_SILENT_MODE },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
-                              KVASER_USB_HAS_SILENT_MODE },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
-                              KVASER_USB_HAS_SILENT_MODE },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
-                              KVASER_USB_HAS_SILENT_MODE },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
-                              KVASER_USB_HAS_SILENT_MODE },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
-                              KVASER_USB_HAS_SILENT_MODE },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
-                              KVASER_USB_HAS_SILENT_MODE },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
-                              KVASER_USB_HAS_SILENT_MODE },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
-                              KVASER_USB_HAS_SILENT_MODE },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
-                              KVASER_USB_HAS_SILENT_MODE },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID) },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
+
+       /* Leaf i.MX28 USB product IDs */
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
 
        /* USBCANII USB product IDs */
        { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
-               .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
 
        /* Minihydra USB product IDs */
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) },
-       { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID),
+               .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
        { }
 };
 MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -285,6 +320,7 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev)
 static void kvaser_usb_read_bulk_callback(struct urb *urb)
 {
        struct kvaser_usb *dev = urb->context;
+       const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
        int err;
        unsigned int i;
 
@@ -301,8 +337,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
                goto resubmit_urb;
        }
 
-       dev->ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
-                                        urb->actual_length);
+       ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
+                                   urb->actual_length);
 
 resubmit_urb:
        usb_fill_bulk_urb(urb, dev->udev,
@@ -396,6 +432,7 @@ static int kvaser_usb_open(struct net_device *netdev)
 {
        struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
        struct kvaser_usb *dev = priv->dev;
+       const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
        int err;
 
        err = open_candev(netdev);
@@ -406,11 +443,11 @@ static int kvaser_usb_open(struct net_device *netdev)
        if (err)
                goto error;
 
-       err = dev->ops->dev_set_opt_mode(priv);
+       err = ops->dev_set_opt_mode(priv);
        if (err)
                goto error;
 
-       err = dev->ops->dev_start_chip(priv);
+       err = ops->dev_start_chip(priv);
        if (err) {
                netdev_warn(netdev, "Cannot start device, error %d\n", err);
                goto error;
@@ -467,22 +504,23 @@ static int kvaser_usb_close(struct net_device *netdev)
 {
        struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
        struct kvaser_usb *dev = priv->dev;
+       const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
        int err;
 
        netif_stop_queue(netdev);
 
-       err = dev->ops->dev_flush_queue(priv);
+       err = ops->dev_flush_queue(priv);
        if (err)
                netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
 
-       if (dev->ops->dev_reset_chip) {
-               err = dev->ops->dev_reset_chip(dev, priv->channel);
+       if (ops->dev_reset_chip) {
+               err = ops->dev_reset_chip(dev, priv->channel);
                if (err)
                        netdev_warn(netdev, "Cannot reset card, error %d\n",
                                    err);
        }
 
-       err = dev->ops->dev_stop_chip(priv);
+       err = ops->dev_stop_chip(priv);
        if (err)
                netdev_warn(netdev, "Cannot stop device, error %d\n", err);
 
@@ -521,6 +559,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
 {
        struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
        struct kvaser_usb *dev = priv->dev;
+       const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
        struct net_device_stats *stats = &netdev->stats;
        struct kvaser_usb_tx_urb_context *context = NULL;
        struct urb *urb;
@@ -563,8 +602,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                goto freeurb;
        }
 
-       buf = dev->ops->dev_frame_to_cmd(priv, skb, &cmd_len,
-                                        context->echo_index);
+       buf = ops->dev_frame_to_cmd(priv, skb, &cmd_len, context->echo_index);
        if (!buf) {
                stats->tx_dropped++;
                dev_kfree_skb(skb);
@@ -648,15 +686,16 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
        }
 }
 
-static int kvaser_usb_init_one(struct kvaser_usb *dev,
-                              const struct usb_device_id *id, int channel)
+static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
 {
        struct net_device *netdev;
        struct kvaser_usb_net_priv *priv;
+       const struct kvaser_usb_driver_info *driver_info = dev->driver_info;
+       const struct kvaser_usb_dev_ops *ops = driver_info->ops;
        int err;
 
-       if (dev->ops->dev_reset_chip) {
-               err = dev->ops->dev_reset_chip(dev, channel);
+       if (ops->dev_reset_chip) {
+               err = ops->dev_reset_chip(dev, channel);
                if (err)
                        return err;
        }
@@ -685,20 +724,19 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev,
        priv->can.state = CAN_STATE_STOPPED;
        priv->can.clock.freq = dev->cfg->clock.freq;
        priv->can.bittiming_const = dev->cfg->bittiming_const;
-       priv->can.do_set_bittiming = dev->ops->dev_set_bittiming;
-       priv->can.do_set_mode = dev->ops->dev_set_mode;
-       if ((id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) ||
+       priv->can.do_set_bittiming = ops->dev_set_bittiming;
+       priv->can.do_set_mode = ops->dev_set_mode;
+       if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) ||
            (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP))
-               priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter;
-       if (id->driver_info & KVASER_USB_HAS_SILENT_MODE)
+               priv->can.do_get_berr_counter = ops->dev_get_berr_counter;
+       if (driver_info->quirks & KVASER_USB_QUIRK_HAS_SILENT_MODE)
                priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
 
        priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported;
 
        if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
                priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
-               priv->can.do_set_data_bittiming =
-                                       dev->ops->dev_set_data_bittiming;
+               priv->can.do_set_data_bittiming = ops->dev_set_data_bittiming;
        }
 
        netdev->flags |= IFF_ECHO;
@@ -729,29 +767,22 @@ static int kvaser_usb_probe(struct usb_interface *intf,
        struct kvaser_usb *dev;
        int err;
        int i;
+       const struct kvaser_usb_driver_info *driver_info;
+       const struct kvaser_usb_dev_ops *ops;
+
+       driver_info = (const struct kvaser_usb_driver_info *)id->driver_info;
+       if (!driver_info)
+               return -ENODEV;
 
        dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
        if (!dev)
                return -ENOMEM;
 
-       if (kvaser_is_leaf(id)) {
-               dev->card_data.leaf.family = KVASER_LEAF;
-               dev->ops = &kvaser_usb_leaf_dev_ops;
-       } else if (kvaser_is_usbcan(id)) {
-               dev->card_data.leaf.family = KVASER_USBCAN;
-               dev->ops = &kvaser_usb_leaf_dev_ops;
-       } else if (kvaser_is_hydra(id)) {
-               dev->ops = &kvaser_usb_hydra_dev_ops;
-       } else {
-               dev_err(&intf->dev,
-                       "Product ID (%d) is not a supported Kvaser USB device\n",
-                       id->idProduct);
-               return -ENODEV;
-       }
-
        dev->intf = intf;
+       dev->driver_info = driver_info;
+       ops = driver_info->ops;
 
-       err = dev->ops->dev_setup_endpoints(dev);
+       err = ops->dev_setup_endpoints(dev);
        if (err) {
                dev_err(&intf->dev, "Cannot get usb endpoint(s)");
                return err;
@@ -765,22 +796,22 @@ static int kvaser_usb_probe(struct usb_interface *intf,
 
        dev->card_data.ctrlmode_supported = 0;
        dev->card_data.capabilities = 0;
-       err = dev->ops->dev_init_card(dev);
+       err = ops->dev_init_card(dev);
        if (err) {
                dev_err(&intf->dev,
                        "Failed to initialize card, error %d\n", err);
                return err;
        }
 
-       err = dev->ops->dev_get_software_info(dev);
+       err = ops->dev_get_software_info(dev);
        if (err) {
                dev_err(&intf->dev,
                        "Cannot get software info, error %d\n", err);
                return err;
        }
 
-       if (dev->ops->dev_get_software_details) {
-               err = dev->ops->dev_get_software_details(dev);
+       if (ops->dev_get_software_details) {
+               err = ops->dev_get_software_details(dev);
                if (err) {
                        dev_err(&intf->dev,
                                "Cannot get software details, error %d\n", err);
@@ -798,14 +829,14 @@ static int kvaser_usb_probe(struct usb_interface *intf,
 
        dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
 
-       err = dev->ops->dev_get_card_info(dev);
+       err = ops->dev_get_card_info(dev);
        if (err) {
                dev_err(&intf->dev, "Cannot get card info, error %d\n", err);
                return err;
        }
 
-       if (dev->ops->dev_get_capabilities) {
-               err = dev->ops->dev_get_capabilities(dev);
+       if (ops->dev_get_capabilities) {
+               err = ops->dev_get_capabilities(dev);
                if (err) {
                        dev_err(&intf->dev,
                                "Cannot get capabilities, error %d\n", err);
@@ -815,7 +846,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
        }
 
        for (i = 0; i < dev->nchannels; i++) {
-               err = kvaser_usb_init_one(dev, id, i);
+               err = kvaser_usb_init_one(dev, i);
                if (err) {
                        kvaser_usb_remove_interfaces(dev);
                        return err;
index a26823c..5d70844 100644 (file)
@@ -375,7 +375,7 @@ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
        .brp_inc = 1,
 };
 
-static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = {
+const struct can_bittiming_const kvaser_usb_flexc_bittiming_const = {
        .name = "kvaser_usb_flex",
        .tseg1_min = 4,
        .tseg1_max = 16,
@@ -2052,7 +2052,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = {
                .freq = 24 * MEGA /* Hz */,
        },
        .timestamp_freq = 1,
-       .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c,
+       .bittiming_const = &kvaser_usb_flexc_bittiming_const,
 };
 
 static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt = {
index c805b99..cc809ec 100644 (file)
 #define USBCAN_ERROR_STATE_RX_ERROR    BIT(1)
 #define USBCAN_ERROR_STATE_BUSERROR    BIT(2)
 
-/* bittiming parameters */
-#define KVASER_USB_TSEG1_MIN           1
-#define KVASER_USB_TSEG1_MAX           16
-#define KVASER_USB_TSEG2_MIN           1
-#define KVASER_USB_TSEG2_MAX           8
-#define KVASER_USB_SJW_MAX             4
-#define KVASER_USB_BRP_MIN             1
-#define KVASER_USB_BRP_MAX             64
-#define KVASER_USB_BRP_INC             1
-
 /* ctrl modes */
 #define KVASER_CTRL_MODE_NORMAL                1
 #define KVASER_CTRL_MODE_SILENT                2
@@ -343,48 +333,68 @@ struct kvaser_usb_err_summary {
        };
 };
 
-static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
-       .name = "kvaser_usb",
-       .tseg1_min = KVASER_USB_TSEG1_MIN,
-       .tseg1_max = KVASER_USB_TSEG1_MAX,
-       .tseg2_min = KVASER_USB_TSEG2_MIN,
-       .tseg2_max = KVASER_USB_TSEG2_MAX,
-       .sjw_max = KVASER_USB_SJW_MAX,
-       .brp_min = KVASER_USB_BRP_MIN,
-       .brp_max = KVASER_USB_BRP_MAX,
-       .brp_inc = KVASER_USB_BRP_INC,
+static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = {
+       .name = "kvaser_usb_ucii",
+       .tseg1_min = 4,
+       .tseg1_max = 16,
+       .tseg2_min = 2,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 16,
+       .brp_inc = 1,
+};
+
+static const struct can_bittiming_const kvaser_usb_leaf_m32c_bittiming_const = {
+       .name = "kvaser_usb_leaf",
+       .tseg1_min = 3,
+       .tseg1_max = 16,
+       .tseg2_min = 2,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 2,
+       .brp_max = 128,
+       .brp_inc = 2,
 };
 
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = {
        .clock = {
                .freq = 8 * MEGA /* Hz */,
        },
        .timestamp_freq = 1,
-       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+       .bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg = {
+       .clock = {
+               .freq = 16 * MEGA /* Hz */,
+       },
+       .timestamp_freq = 1,
+       .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
 };
 
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = {
        .clock = {
                .freq = 16 * MEGA /* Hz */,
        },
        .timestamp_freq = 1,
-       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+       .bittiming_const = &kvaser_usb_flexc_bittiming_const,
 };
 
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = {
        .clock = {
                .freq = 24 * MEGA /* Hz */,
        },
        .timestamp_freq = 1,
-       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+       .bittiming_const = &kvaser_usb_flexc_bittiming_const,
 };
 
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = {
        .clock = {
                .freq = 32 * MEGA /* Hz */,
        },
        .timestamp_freq = 1,
-       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+       .bittiming_const = &kvaser_usb_flexc_bittiming_const,
 };
 
 static void *
@@ -404,7 +414,7 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
                                      sizeof(struct kvaser_cmd_tx_can);
                cmd->u.tx_can.channel = priv->channel;
 
-               switch (dev->card_data.leaf.family) {
+               switch (dev->driver_info->family) {
                case KVASER_LEAF:
                        cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags;
                        break;
@@ -524,16 +534,23 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
        dev->fw_version = le32_to_cpu(softinfo->fw_version);
        dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
 
-       switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
-       case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
-               dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz;
-               break;
-       case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
-               dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz;
-               break;
-       case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
-               dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz;
-               break;
+       if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) {
+               /* Firmware expects bittiming parameters calculated for 16MHz
+                * clock, regardless of the actual clock
+                */
+               dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg;
+       } else {
+               switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+               case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+                       dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_16mhz;
+                       break;
+               case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+                       dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_24mhz;
+                       break;
+               case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+                       dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_32mhz;
+                       break;
+               }
        }
 }
 
@@ -550,7 +567,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
        if (err)
                return err;
 
-       switch (dev->card_data.leaf.family) {
+       switch (dev->driver_info->family) {
        case KVASER_LEAF:
                kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
                break;
@@ -558,7 +575,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
                dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
                dev->max_tx_urbs =
                        le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
-               dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz;
+               dev->cfg = &kvaser_usb_leaf_usbcan_dev_cfg;
                break;
        }
 
@@ -597,7 +614,7 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
 
        dev->nchannels = cmd.u.cardinfo.nchannels;
        if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES ||
-           (dev->card_data.leaf.family == KVASER_USBCAN &&
+           (dev->driver_info->family == KVASER_USBCAN &&
             dev->nchannels > MAX_USBCAN_NET_DEVICES))
                return -EINVAL;
 
@@ -730,7 +747,7 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
            new_state < CAN_STATE_BUS_OFF)
                priv->can.can_stats.restarts++;
 
-       switch (dev->card_data.leaf.family) {
+       switch (dev->driver_info->family) {
        case KVASER_LEAF:
                if (es->leaf.error_factor) {
                        priv->can.can_stats.bus_error++;
@@ -809,7 +826,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
                }
        }
 
-       switch (dev->card_data.leaf.family) {
+       switch (dev->driver_info->family) {
        case KVASER_LEAF:
                if (es->leaf.error_factor) {
                        cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
@@ -999,7 +1016,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
        stats = &priv->netdev->stats;
 
        if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
-           (dev->card_data.leaf.family == KVASER_LEAF &&
+           (dev->driver_info->family == KVASER_LEAF &&
             cmd->id == CMD_LEAF_LOG_MESSAGE)) {
                kvaser_usb_leaf_leaf_rx_error(dev, cmd);
                return;
@@ -1015,7 +1032,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
                return;
        }
 
-       switch (dev->card_data.leaf.family) {
+       switch (dev->driver_info->family) {
        case KVASER_LEAF:
                rx_data = cmd->u.leaf.rx_can.data;
                break;
@@ -1030,7 +1047,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
                return;
        }
 
-       if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id ==
+       if (dev->driver_info->family == KVASER_LEAF && cmd->id ==
            CMD_LEAF_LOG_MESSAGE) {
                cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id);
                if (cf->can_id & KVASER_EXTENDED_FRAME)
@@ -1128,14 +1145,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
                break;
 
        case CMD_LEAF_LOG_MESSAGE:
-               if (dev->card_data.leaf.family != KVASER_LEAF)
+               if (dev->driver_info->family != KVASER_LEAF)
                        goto warn;
                kvaser_usb_leaf_rx_can_msg(dev, cmd);
                break;
 
        case CMD_CHIP_STATE_EVENT:
        case CMD_CAN_ERROR_EVENT:
-               if (dev->card_data.leaf.family == KVASER_LEAF)
+               if (dev->driver_info->family == KVASER_LEAF)
                        kvaser_usb_leaf_leaf_rx_error(dev, cmd);
                else
                        kvaser_usb_leaf_usbcan_rx_error(dev, cmd);
@@ -1147,12 +1164,12 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
 
        /* Ignored commands */
        case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
-               if (dev->card_data.leaf.family != KVASER_USBCAN)
+               if (dev->driver_info->family != KVASER_USBCAN)
                        goto warn;
                break;
 
        case CMD_FLUSH_QUEUE_REPLY:
-               if (dev->card_data.leaf.family != KVASER_LEAF)
+               if (dev->driver_info->family != KVASER_LEAF)
                        goto warn;
                break;
 
index 8a3b7b1..e179d31 100644 (file)
@@ -258,7 +258,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
        .tseg2_min = 1,
        .tseg2_max = 128,
        .sjw_max = 128,
-       .brp_min = 2,
+       .brp_min = 1,
        .brp_max = 256,
        .brp_inc = 1,
 };
@@ -271,7 +271,7 @@ static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
        .tseg2_min = 1,
        .tseg2_max = 16,
        .sjw_max = 16,
-       .brp_min = 2,
+       .brp_min = 1,
        .brp_max = 256,
        .brp_inc = 1,
 };
index 87e81c6..be0edfa 100644 (file)
@@ -878,6 +878,11 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
                if (duplex == DUPLEX_FULL)
                        reg |= DUPLX_MODE;
 
+               if (tx_pause)
+                       reg |= TXFLOW_CNTL;
+               if (rx_pause)
+                       reg |= RXFLOW_CNTL;
+
                core_writel(priv, reg, offset);
        }
 
index 2572c60..b28baab 100644 (file)
@@ -300,6 +300,7 @@ static int hellcreek_led_setup(struct hellcreek *hellcreek)
        const char *label, *state;
        int ret = -EINVAL;
 
+       of_node_get(hellcreek->dev->of_node);
        leds = of_find_node_by_name(hellcreek->dev->of_node, "leds");
        if (!leds) {
                dev_err(hellcreek->dev, "No LEDs specified in device tree!\n");
index 9ca8c8d..92a500e 100644 (file)
@@ -1038,18 +1038,21 @@ int ksz_switch_register(struct ksz_device *dev,
                ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports");
                if (!ports)
                        ports = of_get_child_by_name(dev->dev->of_node, "ports");
-               if (ports)
+               if (ports) {
                        for_each_available_child_of_node(ports, port) {
                                if (of_property_read_u32(port, "reg",
                                                         &port_num))
                                        continue;
                                if (!(dev->port_mask & BIT(port_num))) {
                                        of_node_put(port);
+                                       of_node_put(ports);
                                        return -EINVAL;
                                }
                                of_get_phy_mode(port,
                                                &dev->ports[port_num].interface);
                        }
+                       of_node_put(ports);
+               }
                dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
                                                         "microchip,synclko-125");
                dev->synclko_disable = of_property_read_bool(dev->dev->of_node,
index 570d020..9c27b9b 100644 (file)
@@ -1886,6 +1886,8 @@ static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
 static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
                                      struct felix_stream_filter_counters *counters)
 {
+       mutex_lock(&ocelot->stats_lock);
+
        ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
                   SYS_STAT_CFG_STAT_VIEW_M,
                   SYS_STAT_CFG);
@@ -1900,6 +1902,8 @@ static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
                     SYS_STAT_CFG_STAT_VIEW(index) |
                     SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
                     SYS_STAT_CFG);
+
+       mutex_unlock(&ocelot->stats_lock);
 }
 
 static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
index 2727d31..1cbb05b 100644 (file)
@@ -2334,6 +2334,7 @@ static int
 qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
 {
        struct qca8k_priv *priv = ds->priv;
+       int ret;
 
        /* We have only have a general MTU setting.
         * DSA always set the CPU port's MTU to the largest MTU of the slave
@@ -2344,8 +2345,27 @@ qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
        if (!dsa_is_cpu_port(ds, port))
                return 0;
 
+       /* To change the MAX_FRAME_SIZE the cpu ports must be off or
+        * the switch panics.
+        * Turn off both cpu ports before applying the new value to prevent
+        * this.
+        */
+       if (priv->port_enabled_map & BIT(0))
+               qca8k_port_set_status(priv, 0, 0);
+
+       if (priv->port_enabled_map & BIT(6))
+               qca8k_port_set_status(priv, 6, 0);
+
        /* Include L2 header / FCS length */
-       return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
+       ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
+
+       if (priv->port_enabled_map & BIT(0))
+               qca8k_port_set_status(priv, 0, 1);
+
+       if (priv->port_enabled_map & BIT(6))
+               qca8k_port_set_status(priv, 6, 1);
+
+       return ret;
 }
 
 static int
index 04408e1..ec58d0e 100644 (file)
@@ -15,7 +15,7 @@
 
 #define QCA8K_ETHERNET_MDIO_PRIORITY                   7
 #define QCA8K_ETHERNET_PHY_PRIORITY                    6
-#define QCA8K_ETHERNET_TIMEOUT                         100
+#define QCA8K_ETHERNET_TIMEOUT                         5
 
 #define QCA8K_NUM_PORTS                                        7
 #define QCA8K_NUM_CPU_PORTS                            2
index 72b6fc1..698c7d1 100644 (file)
@@ -3382,12 +3382,28 @@ static const struct of_device_id sja1105_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
 
+static const struct spi_device_id sja1105_spi_ids[] = {
+       { "sja1105e" },
+       { "sja1105t" },
+       { "sja1105p" },
+       { "sja1105q" },
+       { "sja1105r" },
+       { "sja1105s" },
+       { "sja1110a" },
+       { "sja1110b" },
+       { "sja1110c" },
+       { "sja1110d" },
+       { },
+};
+MODULE_DEVICE_TABLE(spi, sja1105_spi_ids);
+
 static struct spi_driver sja1105_driver = {
        .driver = {
                .name  = "sja1105",
                .owner = THIS_MODULE,
                .of_match_table = of_match_ptr(sja1105_dt_ids),
        },
+       .id_table = sja1105_spi_ids,
        .probe  = sja1105_probe,
        .remove = sja1105_remove,
        .shutdown = sja1105_shutdown,
index 3110895..97a92e6 100644 (file)
@@ -205,10 +205,20 @@ static const struct of_device_id vsc73xx_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
 
+static const struct spi_device_id vsc73xx_spi_ids[] = {
+       { "vsc7385" },
+       { "vsc7388" },
+       { "vsc7395" },
+       { "vsc7398" },
+       { },
+};
+MODULE_DEVICE_TABLE(spi, vsc73xx_spi_ids);
+
 static struct spi_driver vsc73xx_spi_driver = {
        .probe = vsc73xx_spi_probe,
        .remove = vsc73xx_spi_remove,
        .shutdown = vsc73xx_spi_shutdown,
+       .id_table = vsc73xx_spi_ids,
        .driver = {
                .name = "vsc73xx-spi",
                .of_match_table = vsc73xx_of_match,
index 4ebd241..4d790a8 100644 (file)
@@ -338,7 +338,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
                 *   the PHY resources listed last
                 */
                phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
-               phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
+               phy_irqnum = platform_irq_count(pdev) - 1;
                dma_irqnum = 1;
                dma_irqend = phy_irqnum;
        } else {
@@ -348,7 +348,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
                phy_memnum = 0;
                phy_irqnum = 0;
                dma_irqnum = 1;
-               dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ);
+               dma_irqend = platform_irq_count(pdev);
        }
 
        /* Obtain the mmio areas for the device */
index 8318339..8647125 100644 (file)
@@ -379,7 +379,7 @@ static void aq_pci_shutdown(struct pci_dev *pdev)
        }
 }
 
-static int aq_suspend_common(struct device *dev, bool deep)
+static int aq_suspend_common(struct device *dev)
 {
        struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
 
@@ -392,17 +392,15 @@ static int aq_suspend_common(struct device *dev, bool deep)
        if (netif_running(nic->ndev))
                aq_nic_stop(nic);
 
-       if (deep) {
-               aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
-               aq_nic_set_power(nic);
-       }
+       aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+       aq_nic_set_power(nic);
 
        rtnl_unlock();
 
        return 0;
 }
 
-static int atl_resume_common(struct device *dev, bool deep)
+static int atl_resume_common(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct aq_nic_s *nic;
@@ -415,11 +413,6 @@ static int atl_resume_common(struct device *dev, bool deep)
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
 
-       if (deep) {
-               /* Reinitialize Nic/Vecs objects */
-               aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
-       }
-
        if (netif_running(nic->ndev)) {
                ret = aq_nic_init(nic);
                if (ret)
@@ -444,22 +437,22 @@ err_exit:
 
 static int aq_pm_freeze(struct device *dev)
 {
-       return aq_suspend_common(dev, true);
+       return aq_suspend_common(dev);
 }
 
 static int aq_pm_suspend_poweroff(struct device *dev)
 {
-       return aq_suspend_common(dev, true);
+       return aq_suspend_common(dev);
 }
 
 static int aq_pm_thaw(struct device *dev)
 {
-       return atl_resume_common(dev, true);
+       return atl_resume_common(dev);
 }
 
 static int aq_pm_resume_restore(struct device *dev)
 {
-       return atl_resume_common(dev, true);
+       return atl_resume_common(dev);
 }
 
 static const struct dev_pm_ops aq_pm_ops = {
index e6f4878..02bd3cf 100644 (file)
@@ -332,7 +332,6 @@ static void bgmac_remove(struct bcma_device *core)
        bcma_mdio_mii_unregister(bgmac->mii_bus);
        bgmac_enet_remove(bgmac);
        bcma_set_drvdata(core, NULL);
-       kfree(bgmac);
 }
 
 static struct bcma_driver bgmac_bcma_driver = {
index 56b46b8..cf9b005 100644 (file)
@@ -7790,7 +7790,7 @@ hwrm_dbg_qcaps_exit:
 
 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
 
-static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 {
        int rc;
 
@@ -10065,7 +10065,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
 
        if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
                resc_reinit = true;
-       if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
+       if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
+           test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
                fw_reset = true;
        else
                bnxt_remap_fw_health_regs(bp);
index a1dca8c..075c620 100644 (file)
@@ -2314,6 +2314,7 @@ int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
+int bnxt_hwrm_func_qcaps(struct bnxt *bp);
 int bnxt_hwrm_fw_set_time(struct bnxt *);
 int bnxt_open_nic(struct bnxt *, bool, bool);
 int bnxt_half_open_nic(struct bnxt *bp);
index 3528ce9..6b3d4f4 100644 (file)
@@ -979,9 +979,11 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
        if (rc)
                return rc;
 
-       rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
-       if (rc)
-               return rc;
+       if (BNXT_CHIP_P5(bp)) {
+               rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
+               if (rc)
+                       return rc;
+       }
        return bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_CRT_PATCH);
 
 }
index 562f8f6..7f3c087 100644 (file)
@@ -76,14 +76,23 @@ static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
                            u64 *ns)
 {
        struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       u32 high_before, high_now, low;
 
        if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
                return -EIO;
 
+       high_before = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);
        ptp_read_system_prets(sts);
-       *ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+       low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
        ptp_read_system_postts(sts);
-       *ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32;
+       high_now = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);
+       if (high_now != high_before) {
+               ptp_read_system_prets(sts);
+               low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+               ptp_read_system_postts(sts);
+       }
+       *ns = ((u64)high_now << 32) | low;
+
        return 0;
 }
 
index ddf2f39..a1a2c7a 100644 (file)
@@ -823,8 +823,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
                goto err_out2;
 
        rc = pci_enable_sriov(bp->pdev, *num_vfs);
-       if (rc)
+       if (rc) {
+               bnxt_ulp_sriov_cfg(bp, 0);
                goto err_out2;
+       }
 
        return 0;
 
@@ -832,6 +834,9 @@ err_out2:
        /* Free the resources reserved for various VF's */
        bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
 
+       /* Restore the max resources */
+       bnxt_hwrm_func_qcaps(bp);
+
 err_out1:
        bnxt_free_vf_resources(bp);
 
index f02fe90..f53387e 100644 (file)
@@ -28,7 +28,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
                                   struct xdp_buff *xdp)
 {
        struct skb_shared_info *sinfo;
-       struct bnxt_sw_tx_bd *tx_buf, *first_buf;
+       struct bnxt_sw_tx_bd *tx_buf;
        struct tx_bd *txbd;
        int num_frags = 0;
        u32 flags;
@@ -43,13 +43,14 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
        /* fill up the first buffer */
        prod = txr->tx_prod;
        tx_buf = &txr->tx_buf_ring[prod];
-       first_buf = tx_buf;
        tx_buf->nr_frags = num_frags;
        if (xdp)
                tx_buf->page = virt_to_head_page(xdp->data);
 
        txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
-       flags = ((len) << TX_BD_LEN_SHIFT) | ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT);
+       flags = (len << TX_BD_LEN_SHIFT) |
+               ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
+               bnxt_lhint_arr[len >> 9];
        txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
        txbd->tx_bd_opaque = prod;
        txbd->tx_bd_haddr = cpu_to_le64(mapping);
@@ -82,7 +83,6 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
 
                flags = frag_len << TX_BD_LEN_SHIFT;
                txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
-               txbd->tx_bd_opaque = prod;
                txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
 
                len = frag_len;
@@ -96,7 +96,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
        prod = NEXT_TX(prod);
        txr->tx_prod = prod;
 
-       return first_buf;
+       return tx_buf;
 }
 
 static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
index 4af5561..ddfe920 100644 (file)
@@ -1236,8 +1236,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
        csk->sndbuf = newsk->sk_sndbuf;
        csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
        RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
-                                          sock_net(newsk)->
-                                               ipv4.sysctl_tcp_window_scaling,
+                                          READ_ONCE(sock_net(newsk)->
+                                                    ipv4.sysctl_tcp_window_scaling),
                                           tp->window_clamp);
        neigh_release(n);
        inet_inherit_port(&tcp_hashinfo, lsk, newsk);
@@ -1384,7 +1384,7 @@ static void chtls_pass_accept_request(struct sock *sk,
 #endif
        }
        if (req->tcpopt.wsf <= 14 &&
-           sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
+           READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
                inet_rsk(oreq)->wscale_ok = 1;
                inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
        }
@@ -1392,7 +1392,7 @@ static void chtls_pass_accept_request(struct sock *sk,
        th_ecn = tcph->ece && tcph->cwr;
        if (th_ecn) {
                ect = !INET_ECN_is_not_ect(ip_dsfield);
-               ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
+               ecn_ok = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn);
                if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
                        inet_rsk(oreq)->ecn_ok = 1;
        }
index 528eb0f..b4f5e57 100644 (file)
@@ -2287,7 +2287,7 @@ err:
 
 /* Uses sync mcc */
 int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
-                                     u8 page_num, u8 *data)
+                                     u8 page_num, u32 off, u32 len, u8 *data)
 {
        struct be_dma_mem cmd;
        struct be_mcc_wrb *wrb;
@@ -2321,10 +2321,10 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
        req->port = cpu_to_le32(adapter->hba_port_num);
        req->page_num = cpu_to_le32(page_num);
        status = be_mcc_notify_wait(adapter);
-       if (!status) {
+       if (!status && len > 0) {
                struct be_cmd_resp_port_type *resp = cmd.va;
 
-               memcpy(data, resp->page_data, PAGE_DATA_LEN);
+               memcpy(data, resp->page_data + off, len);
        }
 err:
        mutex_unlock(&adapter->mcc_lock);
@@ -2415,7 +2415,7 @@ int be_cmd_query_cable_type(struct be_adapter *adapter)
        int status;
 
        status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
-                                                  page_data);
+                                                  0, PAGE_DATA_LEN, page_data);
        if (!status) {
                switch (adapter->phy.interface_type) {
                case PHY_TYPE_QSFP:
@@ -2440,7 +2440,7 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter)
        int status;
 
        status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
-                                                  page_data);
+                                                  0, PAGE_DATA_LEN, page_data);
        if (!status) {
                strlcpy(adapter->phy.vendor_name, page_data +
                        SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
index db1f3b9..e2085c6 100644 (file)
@@ -2427,7 +2427,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
                            u32 *state);
 int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
-                                     u8 page_num, u8 *data);
+                                     u8 page_num, u32 off, u32 len, u8 *data);
 int be_cmd_query_cable_type(struct be_adapter *adapter);
 int be_cmd_query_sfp_info(struct be_adapter *adapter);
 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
index dfa7843..bd0df18 100644 (file)
@@ -1344,7 +1344,7 @@ static int be_get_module_info(struct net_device *netdev,
                return -EOPNOTSUPP;
 
        status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
-                                                  page_data);
+                                                  0, PAGE_DATA_LEN, page_data);
        if (!status) {
                if (!page_data[SFP_PLUS_SFF_8472_COMP]) {
                        modinfo->type = ETH_MODULE_SFF_8079;
@@ -1362,25 +1362,32 @@ static int be_get_module_eeprom(struct net_device *netdev,
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        int status;
+       u32 begin, end;
 
        if (!check_privilege(adapter, MAX_PRIVILEGES))
                return -EOPNOTSUPP;
 
-       status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
-                                                  data);
-       if (status)
-               goto err;
+       begin = eeprom->offset;
+       end = eeprom->offset + eeprom->len;
+
+       if (begin < PAGE_DATA_LEN) {
+               status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin,
+                                                          min_t(u32, end, PAGE_DATA_LEN) - begin,
+                                                          data);
+               if (status)
+                       goto err;
+
+               data += PAGE_DATA_LEN - begin;
+               begin = PAGE_DATA_LEN;
+       }
 
-       if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) {
-               status = be_cmd_read_port_transceiver_data(adapter,
-                                                          TR_PAGE_A2,
-                                                          data +
-                                                          PAGE_DATA_LEN);
+       if (end > PAGE_DATA_LEN) {
+               status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A2,
+                                                          begin - PAGE_DATA_LEN,
+                                                          end - begin, data);
                if (status)
                        goto err;
        }
-       if (eeprom->offset)
-               memcpy(data, data + eeprom->offset, eeprom->len);
 err:
        return be_cmd_status(status);
 }
index 5231818..c036637 100644 (file)
@@ -1764,6 +1764,19 @@ cleanup_clk:
        return rc;
 }
 
+static bool ftgmac100_has_child_node(struct device_node *np, const char *name)
+{
+       struct device_node *child_np = of_get_child_by_name(np, name);
+       bool ret = false;
+
+       if (child_np) {
+               ret = true;
+               of_node_put(child_np);
+       }
+
+       return ret;
+}
+
 static int ftgmac100_probe(struct platform_device *pdev)
 {
        struct resource *res;
@@ -1883,7 +1896,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
 
                /* Display what we found */
                phy_attached_info(phy);
-       } else if (np && !of_get_child_by_name(np, "mdio")) {
+       } else if (np && !ftgmac100_has_child_node(np, "mdio")) {
                /* Support legacy ASPEED devicetree descriptions that decribe a
                 * MAC with an embedded MDIO controller but have no "mdio"
                 * child node. Automatically scan the MDIO bus for available
index 0f6a549..29a6c2e 100644 (file)
@@ -142,6 +142,7 @@ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
                         int ref_ok, struct funeth_txq *xdp_q)
 {
        struct bpf_prog *xdp_prog;
+       struct xdp_frame *xdpf;
        struct xdp_buff xdp;
        u32 act;
 
@@ -163,7 +164,9 @@ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
        case XDP_TX:
                if (unlikely(!ref_ok))
                        goto pass;
-               if (!fun_xdp_tx(xdp_q, xdp.data, xdp.data_end - xdp.data))
+
+               xdpf = xdp_convert_buff_to_frame(&xdp);
+               if (!xdpf || !fun_xdp_tx(xdp_q, xdpf))
                        goto xdp_error;
                FUN_QSTAT_INC(q, xdp_tx);
                q->xdp_flush |= FUN_XDP_FLUSH_TX;
index ff6e292..2f6698b 100644 (file)
@@ -466,7 +466,7 @@ static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
 
                do {
                        fun_xdp_unmap(q, reclaim_idx);
-                       page_frag_free(q->info[reclaim_idx].vaddr);
+                       xdp_return_frame(q->info[reclaim_idx].xdpf);
 
                        trace_funeth_tx_free(q, reclaim_idx, 1, head);
 
@@ -479,11 +479,11 @@ static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
        return npkts;
 }
 
-bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
+bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
 {
        struct fun_eth_tx_req *req;
        struct fun_dataop_gl *gle;
-       unsigned int idx;
+       unsigned int idx, len;
        dma_addr_t dma;
 
        if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
@@ -494,7 +494,8 @@ bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
                return false;
        }
 
-       dma = dma_map_single(q->dma_dev, data, len, DMA_TO_DEVICE);
+       len = xdpf->len;
+       dma = dma_map_single(q->dma_dev, xdpf->data, len, DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(q->dma_dev, dma))) {
                FUN_QSTAT_INC(q, tx_map_err);
                return false;
@@ -514,7 +515,7 @@ bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
        gle = (struct fun_dataop_gl *)req->dataop.imm;
        fun_dataop_gl_init(gle, 0, 0, len, dma);
 
-       q->info[idx].vaddr = data;
+       q->info[idx].xdpf = xdpf;
 
        u64_stats_update_begin(&q->syncp);
        q->stats.tx_bytes += len;
@@ -545,12 +546,9 @@ int fun_xdp_xmit_frames(struct net_device *dev, int n,
        if (unlikely(q_idx >= fp->num_xdpqs))
                return -ENXIO;
 
-       for (q = xdpqs[q_idx], i = 0; i < n; i++) {
-               const struct xdp_frame *xdpf = frames[i];
-
-               if (!fun_xdp_tx(q, xdpf->data, xdpf->len))
+       for (q = xdpqs[q_idx], i = 0; i < n; i++)
+               if (!fun_xdp_tx(q, frames[i]))
                        break;
-       }
 
        if (unlikely(flags & XDP_XMIT_FLUSH))
                fun_txq_wr_db(q);
@@ -577,7 +575,7 @@ static void fun_xdpq_purge(struct funeth_txq *q)
                unsigned int idx = q->cons_cnt & q->mask;
 
                fun_xdp_unmap(q, idx);
-               page_frag_free(q->info[idx].vaddr);
+               xdp_return_frame(q->info[idx].xdpf);
                q->cons_cnt++;
        }
 }
index 04c9f91..8708e28 100644 (file)
@@ -95,8 +95,8 @@ struct funeth_txq_stats {  /* per Tx queue SW counters */
 
 struct funeth_tx_info {      /* per Tx descriptor state */
        union {
-               struct sk_buff *skb; /* associated packet */
-               void *vaddr;         /* start address for XDP */
+               struct sk_buff *skb;    /* associated packet (sk_buff path) */
+               struct xdp_frame *xdpf; /* associated XDP frame (XDP path) */
        };
 };
 
@@ -245,7 +245,7 @@ static inline int fun_irq_node(const struct fun_irq *p)
 int fun_rxq_napi_poll(struct napi_struct *napi, int budget);
 int fun_txq_napi_poll(struct napi_struct *napi, int budget);
 netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len);
+bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf);
 int fun_xdp_xmit_frames(struct net_device *dev, int n,
                        struct xdp_frame **frames, u32 flags);
 
index 8a3a446..94f80e1 100644 (file)
@@ -769,6 +769,7 @@ struct hnae3_tc_info {
        u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
        u16 tqp_count[HNAE3_MAX_TC];
        u16 tqp_offset[HNAE3_MAX_TC];
+       u8 max_tc; /* Total number of TCs */
        u8 num_tc; /* Total number of enabled TCs */
        bool mqprio_active;
 };
index 6d20974..4c7988e 100644 (file)
@@ -1129,7 +1129,7 @@ hns3_is_ringparam_changed(struct net_device *ndev,
        if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num &&
            old_ringparam->rx_desc_num == new_ringparam->rx_desc_num &&
            old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) {
-               netdev_info(ndev, "ringparam not changed\n");
+               netdev_info(ndev, "descriptor number and rx buffer length not changed\n");
                return false;
        }
 
index 1ebad0e..fae7976 100644 (file)
@@ -3268,7 +3268,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
 static int hclge_update_port_info(struct hclge_dev *hdev)
 {
        struct hclge_mac *mac = &hdev->hw.mac;
-       int speed = HCLGE_MAC_SPEED_UNKNOWN;
+       int speed;
        int ret;
 
        /* get the port info from SFP cmd if not copper port */
@@ -3279,10 +3279,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
        if (!hdev->support_sfp_query)
                return 0;
 
-       if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+       if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+               speed = mac->speed;
                ret = hclge_get_sfp_info(hdev, mac);
-       else
+       } else {
+               speed = HCLGE_MAC_SPEED_UNKNOWN;
                ret = hclge_get_sfp_speed(hdev, &speed);
+       }
 
        if (ret == -EOPNOTSUPP) {
                hdev->support_sfp_query = false;
@@ -3294,6 +3297,8 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
        if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
                if (mac->speed_type == QUERY_ACTIVE_SPEED) {
                        hclge_update_port_capability(hdev, mac);
+                       if (mac->speed != speed)
+                               (void)hclge_tm_port_shaper_cfg(hdev);
                        return 0;
                }
                return hclge_cfg_mac_speed_dup(hdev, mac->speed,
@@ -3376,6 +3381,12 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
        link_state_old = vport->vf_info.link_state;
        vport->vf_info.link_state = link_state;
 
+       /* return success directly if the VF is unalive, VF will
+        * query link state itself when it starts work.
+        */
+       if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+               return 0;
+
        ret = hclge_push_vf_link_status(vport);
        if (ret) {
                vport->vf_info.link_state = link_state_old;
@@ -10117,6 +10128,7 @@ static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
        if (ret)
                return ret;
 
+       vport->port_base_vlan_cfg.tbl_sta = false;
        /* remove old VLAN tag */
        if (old_info->vlan_tag == 0)
                ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
index 1f87a8a..2f33b03 100644 (file)
@@ -282,8 +282,8 @@ static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
        return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
 
-static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
-                                     u16 qs_id, u8 pri)
+static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri,
+                                     bool link_vld)
 {
        struct hclge_qs_to_pri_link_cmd *map;
        struct hclge_desc desc;
@@ -294,7 +294,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
 
        map->qs_id = cpu_to_le16(qs_id);
        map->priority = pri;
-       map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
+       map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0;
 
        return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
@@ -420,7 +420,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
        return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
 
-static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
+int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
 {
        struct hclge_port_shapping_cmd *shap_cfg_cmd;
        struct hclge_shaper_ir_para ir_para;
@@ -642,11 +642,13 @@ static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
         * one tc for VF for simplicity. VF's vport_id is non zero.
         */
        if (vport->vport_id) {
+               kinfo->tc_info.max_tc = 1;
                kinfo->tc_info.num_tc = 1;
                vport->qs_offset = HNAE3_MAX_TC +
                                   vport->vport_id - HCLGE_VF_VPORT_START_NUM;
                vport_max_rss_size = hdev->vf_rss_size_max;
        } else {
+               kinfo->tc_info.max_tc = hdev->tc_max;
                kinfo->tc_info.num_tc =
                        min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
                vport->qs_offset = 0;
@@ -679,7 +681,9 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
        kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
        vport->dwrr = 100;  /* 100 percent as init */
        vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
-       hdev->rss_cfg.rss_size = kinfo->rss_size;
+
+       if (vport->vport_id == PF_VPORT_ID)
+               hdev->rss_cfg.rss_size = kinfo->rss_size;
 
        /* when enable mqprio, the tc_info has been updated. */
        if (kinfo->tc_info.mqprio_active)
@@ -714,14 +718,22 @@ static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
 
 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
 {
-       u8 i;
+       u8 i, tc_sch_mode;
+       u32 bw_limit;
+
+       for (i = 0; i < hdev->tc_max; i++) {
+               if (i < hdev->tm_info.num_tc) {
+                       tc_sch_mode = HCLGE_SCH_MODE_DWRR;
+                       bw_limit = hdev->tm_info.pg_info[0].bw_limit;
+               } else {
+                       tc_sch_mode = HCLGE_SCH_MODE_SP;
+                       bw_limit = 0;
+               }
 
-       for (i = 0; i < hdev->tm_info.num_tc; i++) {
                hdev->tm_info.tc_info[i].tc_id = i;
-               hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
+               hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode;
                hdev->tm_info.tc_info[i].pgid = 0;
-               hdev->tm_info.tc_info[i].bw_limit =
-                       hdev->tm_info.pg_info[0].bw_limit;
+               hdev->tm_info.tc_info[i].bw_limit = bw_limit;
        }
 
        for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
@@ -926,10 +938,13 @@ static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
        for (k = 0; k < hdev->num_alloc_vport; k++) {
                struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
 
-               for (i = 0; i < kinfo->tc_info.num_tc; i++) {
+               for (i = 0; i < kinfo->tc_info.max_tc; i++) {
+                       u8 pri = i < kinfo->tc_info.num_tc ? i : 0;
+                       bool link_vld = i < kinfo->tc_info.num_tc;
+
                        ret = hclge_tm_qs_to_pri_map_cfg(hdev,
                                                         vport[k].qs_offset + i,
-                                                        i);
+                                                        pri, link_vld);
                        if (ret)
                                return ret;
                }
@@ -949,7 +964,7 @@ static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
                for (i = 0; i < HNAE3_MAX_TC; i++) {
                        ret = hclge_tm_qs_to_pri_map_cfg(hdev,
                                                         vport[k].qs_offset + i,
-                                                        k);
+                                                        k, true);
                        if (ret)
                                return ret;
                }
@@ -989,33 +1004,39 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
 {
        u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
        struct hclge_shaper_ir_para ir_para;
-       u32 shaper_para;
+       u32 shaper_para_c, shaper_para_p;
        int ret;
        u32 i;
 
-       for (i = 0; i < hdev->tm_info.num_tc; i++) {
+       for (i = 0; i < hdev->tc_max; i++) {
                u32 rate = hdev->tm_info.tc_info[i].bw_limit;
 
-               ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
-                                            &ir_para, max_tm_rate);
-               if (ret)
-                       return ret;
+               if (rate) {
+                       ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
+                                                    &ir_para, max_tm_rate);
+                       if (ret)
+                               return ret;
+
+                       shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0,
+                                                                  HCLGE_SHAPER_BS_U_DEF,
+                                                                  HCLGE_SHAPER_BS_S_DEF);
+                       shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b,
+                                                                  ir_para.ir_u,
+                                                                  ir_para.ir_s,
+                                                                  HCLGE_SHAPER_BS_U_DEF,
+                                                                  HCLGE_SHAPER_BS_S_DEF);
+               } else {
+                       shaper_para_c = 0;
+                       shaper_para_p = 0;
+               }
 
-               shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
-                                                        HCLGE_SHAPER_BS_U_DEF,
-                                                        HCLGE_SHAPER_BS_S_DEF);
                ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
-                                               shaper_para, rate);
+                                               shaper_para_c, rate);
                if (ret)
                        return ret;
 
-               shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
-                                                        ir_para.ir_u,
-                                                        ir_para.ir_s,
-                                                        HCLGE_SHAPER_BS_U_DEF,
-                                                        HCLGE_SHAPER_BS_S_DEF);
                ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
-                                               shaper_para, rate);
+                                               shaper_para_p, rate);
                if (ret)
                        return ret;
        }
@@ -1125,7 +1146,7 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
        int ret;
        u32 i, k;
 
-       for (i = 0; i < hdev->tm_info.num_tc; i++) {
+       for (i = 0; i < hdev->tc_max; i++) {
                pg_info =
                        &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
                dwrr = pg_info->tc_dwrr[i];
@@ -1135,9 +1156,15 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
                        return ret;
 
                for (k = 0; k < hdev->num_alloc_vport; k++) {
+                       struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
+
+                       if (i >= kinfo->tc_info.max_tc)
+                               continue;
+
+                       dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0;
                        ret = hclge_tm_qs_weight_cfg(
                                hdev, vport[k].qs_offset + i,
-                               vport[k].dwrr);
+                               dwrr);
                        if (ret)
                                return ret;
                }
@@ -1303,6 +1330,7 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
 {
        struct hclge_vport *vport = hdev->vport;
        int ret;
+       u8 mode;
        u16 i;
 
        ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
@@ -1310,9 +1338,16 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
                return ret;
 
        for (i = 0; i < hdev->num_alloc_vport; i++) {
+               struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo;
+
+               if (pri_id >= kinfo->tc_info.max_tc)
+                       continue;
+
+               mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR :
+                      HCLGE_SCH_MODE_SP;
                ret = hclge_tm_qs_schd_mode_cfg(hdev,
                                                vport[i].qs_offset + pri_id,
-                                               HCLGE_SCH_MODE_DWRR);
+                                               mode);
                if (ret)
                        return ret;
        }
@@ -1353,7 +1388,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
        u8 i;
 
        if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
-               for (i = 0; i < hdev->tm_info.num_tc; i++) {
+               for (i = 0; i < hdev->tc_max; i++) {
                        ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
                        if (ret)
                                return ret;
index 619cc30..d943943 100644 (file)
@@ -237,6 +237,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
 void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
 void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
 int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
+int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev);
 int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num);
 int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num);
 int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
index 60ae8bf..1749d26 100644 (file)
@@ -43,9 +43,7 @@ static bool check_image_valid(struct hinic_devlink_priv *priv, const u8 *buf,
 
        for (i = 0; i < fw_image->fw_info.fw_section_cnt; i++) {
                len += fw_image->fw_section_info[i].fw_section_len;
-               memcpy(&host_image->image_section_info[i],
-                      &fw_image->fw_section_info[i],
-                      sizeof(struct fw_section_info_st));
+               host_image->image_section_info[i] = fw_image->fw_section_info[i];
        }
 
        if (len != fw_image->fw_len ||
index 7e7fe5b..5ab7c0f 100644 (file)
@@ -5981,6 +5981,15 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
                        release_sub_crqs(adapter, 0);
                        rc = init_sub_crqs(adapter);
                } else {
+                       /* no need to reinitialize completely, but we do
+                        * need to clean up transmits that were in flight
+                        * when we processed the reset.  Failure to do so
+                        * will confound the upper layer, usually TCP, by
+                        * creating the illusion of transmits that are
+                        * awaiting completion.
+                        */
+                       clean_tx_pools(adapter);
+
                        rc = reset_sub_crq_queues(adapter);
                }
        } else {
index 13382df..bcf680e 100644 (file)
@@ -630,7 +630,6 @@ struct e1000_phy_info {
        bool disable_polarity_correction;
        bool is_mdix;
        bool polarity_correction;
-       bool reset_disable;
        bool speed_downgraded;
        bool autoneg_wait_to_complete;
 };
index e6c8e6d..9466f65 100644 (file)
@@ -2050,10 +2050,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
        bool blocked = false;
        int i = 0;
 
-       /* Check the PHY (LCD) reset flag */
-       if (hw->phy.reset_disable)
-               return true;
-
        while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
               (i++ < 30))
                usleep_range(10000, 11000);
index 638a3dd..2504b11 100644 (file)
 #define I217_CGFREG_ENABLE_MTA_RESET   0x0002
 #define I217_MEMPWR                    PHY_REG(772, 26)
 #define I217_MEMPWR_DISABLE_SMB_RELEASE        0x0010
-#define I217_MEMPWR_MOEM               0x1000
 
 /* Receive Address Initial CRC Calculation */
 #define E1000_PCH_RAICC(_n)    (0x05F50 + ((_n) * 4))
index fa06f68..f172994 100644 (file)
@@ -6494,6 +6494,10 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
 
        if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
            hw->mac.type >= e1000_pch_adp) {
+               /* Keep the GPT clock enabled for CSME */
+               mac_data = er32(FEXTNVM);
+               mac_data |= BIT(3);
+               ew32(FEXTNVM, mac_data);
                /* Request ME unconfigure the device from S0ix */
                mac_data = er32(H2ME);
                mac_data &= ~E1000_H2ME_START_DPG;
@@ -6987,21 +6991,8 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
        struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct pci_dev *pdev = to_pci_dev(dev);
-       struct e1000_hw *hw = &adapter->hw;
-       u16 phy_data;
        int rc;
 
-       if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
-           hw->mac.type >= e1000_pch_adp) {
-               /* Mask OEM Bits / Gig Disable / Restart AN (772_26[12] = 1) */
-               e1e_rphy(hw, I217_MEMPWR, &phy_data);
-               phy_data |= I217_MEMPWR_MOEM;
-               e1e_wphy(hw, I217_MEMPWR, phy_data);
-
-               /* Disable LCD reset */
-               hw->phy.reset_disable = true;
-       }
-
        e1000e_flush_lpic(pdev);
 
        e1000e_pm_freeze(dev);
@@ -7023,8 +7014,6 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
        struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct pci_dev *pdev = to_pci_dev(dev);
-       struct e1000_hw *hw = &adapter->hw;
-       u16 phy_data;
        int rc;
 
        /* Introduce S0ix implementation */
@@ -7035,17 +7024,6 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
        if (rc)
                return rc;
 
-       if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
-           hw->mac.type >= e1000_pch_adp) {
-               /* Unmask OEM Bits / Gig Disable / Restart AN 772_26[12] = 0 */
-               e1e_rphy(hw, I217_MEMPWR, &phy_data);
-               phy_data &= ~I217_MEMPWR_MOEM;
-               e1e_wphy(hw, I217_MEMPWR, phy_data);
-
-               /* Enable LCD reset */
-               hw->phy.reset_disable = false;
-       }
-
        return e1000e_pm_thaw(dev);
 }
 
index 18558a0..407fe8f 100644 (file)
@@ -37,6 +37,7 @@
 #include <net/tc_act/tc_mirred.h>
 #include <net/udp_tunnel.h>
 #include <net/xdp_sock.h>
+#include <linux/bitfield.h>
 #include "i40e_type.h"
 #include "i40e_prototype.h"
 #include <linux/net/intel/i40e_client.h>
@@ -1092,6 +1093,21 @@ static inline void i40e_write_fd_input_set(struct i40e_pf *pf,
                          (u32)(val & 0xFFFFFFFFULL));
 }
 
+/**
+ * i40e_get_pf_count - get PCI PF count.
+ * @hw: pointer to a hw.
+ *
+ * Reports the function number of the highest PCI physical
+ * function plus 1 as it is loaded from the NVM.
+ *
+ * Return: PCI PF count.
+ **/
+static inline u32 i40e_get_pf_count(struct i40e_hw *hw)
+{
+       return FIELD_GET(I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK,
+                        rd32(hw, I40E_GLGEN_PCIFCNCNT));
+}
+
 /* needed by i40e_ethtool.c */
 int i40e_up(struct i40e_vsi *vsi);
 void i40e_down(struct i40e_vsi *vsi);
index 610f00c..19704f5 100644 (file)
@@ -2586,15 +2586,16 @@ static void i40e_diag_test(struct net_device *netdev,
 
                set_bit(__I40E_TESTING, pf->state);
 
+               if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+                   test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
+                       dev_warn(&pf->pdev->dev,
+                                "Cannot start offline testing when PF is in reset state.\n");
+                       goto skip_ol_tests;
+               }
+
                if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
                        dev_warn(&pf->pdev->dev,
                                 "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
-                       data[I40E_ETH_TEST_REG]         = 1;
-                       data[I40E_ETH_TEST_EEPROM]      = 1;
-                       data[I40E_ETH_TEST_INTR]        = 1;
-                       data[I40E_ETH_TEST_LINK]        = 1;
-                       eth_test->flags |= ETH_TEST_FL_FAILED;
-                       clear_bit(__I40E_TESTING, pf->state);
                        goto skip_ol_tests;
                }
 
@@ -2641,9 +2642,17 @@ static void i40e_diag_test(struct net_device *netdev,
                data[I40E_ETH_TEST_INTR] = 0;
        }
 
-skip_ol_tests:
-
        netif_info(pf, drv, netdev, "testing finished\n");
+       return;
+
+skip_ol_tests:
+       data[I40E_ETH_TEST_REG]         = 1;
+       data[I40E_ETH_TEST_EEPROM]      = 1;
+       data[I40E_ETH_TEST_INTR]        = 1;
+       data[I40E_ETH_TEST_LINK]        = 1;
+       eth_test->flags |= ETH_TEST_FL_FAILED;
+       clear_bit(__I40E_TESTING, pf->state);
+       netif_info(pf, drv, netdev, "testing failed\n");
 }
 
 static void i40e_get_wol(struct net_device *netdev,
index 332a608..685556e 100644 (file)
@@ -551,6 +551,47 @@ void i40e_pf_reset_stats(struct i40e_pf *pf)
 }
 
 /**
+ * i40e_compute_pci_to_hw_id - compute index form PCI function.
+ * @vsi: ptr to the VSI to read from.
+ * @hw: ptr to the hardware info.
+ **/
+static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw)
+{
+       int pf_count = i40e_get_pf_count(hw);
+
+       if (vsi->type == I40E_VSI_SRIOV)
+               return (hw->port * BIT(7)) / pf_count + vsi->vf_id;
+
+       return hw->port + BIT(7);
+}
+
+/**
+ * i40e_stat_update64 - read and update a 64 bit stat from the chip.
+ * @hw: ptr to the hardware info.
+ * @hireg: the high 32 bit reg to read.
+ * @loreg: the low 32 bit reg to read.
+ * @offset_loaded: has the initial offset been loaded yet.
+ * @offset: ptr to current offset value.
+ * @stat: ptr to the stat.
+ *
+ * Since the device stats are not reset at PFReset, they will not
+ * be zeroed when the driver starts.  We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero.
+ **/
+static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg,
+                              bool offset_loaded, u64 *offset, u64 *stat)
+{
+       u64 new_data;
+
+       new_data = rd64(hw, loreg);
+
+       if (!offset_loaded || new_data < *offset)
+               *offset = new_data;
+       *stat = new_data - *offset;
+}
+
+/**
  * i40e_stat_update48 - read and update a 48 bit stat from the chip
  * @hw: ptr to the hardware info
  * @hireg: the high 32 bit reg to read
@@ -622,6 +663,34 @@ static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
 }
 
 /**
+ * i40e_stats_update_rx_discards - update rx_discards.
+ * @vsi: ptr to the VSI to be updated.
+ * @hw: ptr to the hardware info.
+ * @stat_idx: VSI's stat_counter_idx.
+ * @offset_loaded: ptr to the VSI's stat_offsets_loaded.
+ * @stat_offset: ptr to stat_offset to store first read of specific register.
+ * @stat: ptr to VSI's stat to be updated.
+ **/
+static void
+i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw,
+                             int stat_idx, bool offset_loaded,
+                             struct i40e_eth_stats *stat_offset,
+                             struct i40e_eth_stats *stat)
+{
+       u64 rx_rdpc, rx_rxerr;
+
+       i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
+                          &stat_offset->rx_discards, &rx_rdpc);
+       i40e_stat_update64(hw,
+                          I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
+                          I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
+                          offset_loaded, &stat_offset->rx_discards_other,
+                          &rx_rxerr);
+
+       stat->rx_discards = rx_rdpc + rx_rxerr;
+}
+
+/**
  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
  * @vsi: the VSI to be updated
  **/
@@ -680,6 +749,10 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
                           I40E_GLV_BPTCL(stat_idx),
                           vsi->stat_offsets_loaded,
                           &oes->tx_broadcast, &es->tx_broadcast);
+
+       i40e_stats_update_rx_discards(vsi, hw, stat_idx,
+                                     vsi->stat_offsets_loaded, oes, es);
+
        vsi->stat_offsets_loaded = true;
 }
 
@@ -1852,11 +1925,15 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                 * non-zero req_queue_pairs says that user requested a new
                 * queue count via ethtool's set_channels, so use this
                 * value for queues distribution across traffic classes
+                * We need at least one queue pair for the interface
+                * to be usable as we see in else statement.
                 */
                if (vsi->req_queue_pairs > 0)
                        vsi->num_queue_pairs = vsi->req_queue_pairs;
                else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
                        vsi->num_queue_pairs = pf->num_lan_msix;
+               else
+                       vsi->num_queue_pairs = 1;
        }
 
        /* Number of queues per enabled TC */
@@ -8542,6 +8619,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
                return -EOPNOTSUPP;
        }
 
+       if (!tc) {
+               dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
+               return -EINVAL;
+       }
+
        if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
            test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
                return -EBUSY;
@@ -10572,7 +10654,7 @@ static int i40e_reset(struct i40e_pf *pf)
  **/
 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
 {
-       int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
+       const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
        struct i40e_hw *hw = &pf->hw;
        i40e_status ret;
@@ -10580,13 +10662,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
        int v;
 
        if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
-           i40e_check_recovery_mode(pf)) {
+           is_recovery_mode_reported)
                i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
-       }
 
        if (test_bit(__I40E_DOWN, pf->state) &&
-           !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
-           !old_recovery_mode_bit)
+           !test_bit(__I40E_RECOVERY_MODE, pf->state))
                goto clear_recovery;
        dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
 
@@ -10613,13 +10693,12 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
         * accordingly with regard to resources initialization
         * and deinitialization
         */
-       if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
-           old_recovery_mode_bit) {
+       if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
                if (i40e_get_capabilities(pf,
                                          i40e_aqc_opc_list_func_capabilities))
                        goto end_unlock;
 
-               if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+               if (is_recovery_mode_reported) {
                        /* we're staying in recovery mode so we'll reinitialize
                         * misc vector here
                         */
index 1908eed..7339003 100644 (file)
 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
 #define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
 #define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT                0x001C0AB4 /* Reset: PCIR */
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK  I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK  I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
 #define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
 #define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
 #define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
 #define I40E_VFQF_HKEY1_MAX_INDEX 12
 #define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_GL_RXERR1H(_i)             (0x00318004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1H_MAX_INDEX       143
+#define I40E_GL_RXERR1H_RXERR1H_SHIFT   0
+#define I40E_GL_RXERR1H_RXERR1H_MASK    I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1H_RXERR1H_SHIFT)
+#define I40E_GL_RXERR1L(_i)             (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1L_MAX_INDEX       143
+#define I40E_GL_RXERR1L_RXERR1L_SHIFT   0
+#define I40E_GL_RXERR1L_RXERR1L_MASK    I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1L_RXERR1L_SHIFT)
 #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
index 36a4ca1..7b3f30b 100644 (file)
@@ -1172,6 +1172,7 @@ struct i40e_eth_stats {
        u64 tx_broadcast;               /* bptc */
        u64 tx_discards;                /* tdpc */
        u64 tx_errors;                  /* tepc */
+       u64 rx_discards_other;          /* rxerr1 */
 };
 
 /* Statistics collected per VEB per TC */
index 2606e8f..86b0f21 100644 (file)
@@ -2147,6 +2147,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
                /* VFs only use TC 0 */
                vfres->vsi_res[0].qset_handle
                                          = le16_to_cpu(vsi->info.qs_handle[0]);
+               if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
+                       i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
+                       eth_zero_addr(vf->default_lan_addr.addr);
+               }
                ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
                                vf->default_lan_addr.addr);
        }
@@ -2282,7 +2286,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
        }
 
        if (vf->adq_enabled) {
-               for (i = 0; i < I40E_MAX_VF_VSI; i++)
+               for (i = 0; i < vf->num_tc; i++)
                        num_qps_all += vf->ch[i].num_qps;
                if (num_qps_all != qci->num_queue_pairs) {
                        aq_ret = I40E_ERR_PARAM;
index 49aed3e..0ea0361 100644 (file)
@@ -64,7 +64,6 @@ struct iavf_vsi {
        u16 id;
        DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
        int base_vector;
-       u16 work_limit;
        u16 qs_handle;
        void *priv;     /* client driver data reference. */
 };
@@ -159,8 +158,12 @@ struct iavf_vlan {
 struct iavf_vlan_filter {
        struct list_head list;
        struct iavf_vlan vlan;
-       bool remove;            /* filter needs to be removed */
-       bool add;               /* filter needs to be added */
+       struct {
+               u8 is_new_vlan:1;       /* filter is new, wait for PF answer */
+               u8 remove:1;            /* filter needs to be removed */
+               u8 add:1;               /* filter needs to be added */
+               u8 padding:5;
+       };
 };
 
 #define IAVF_MAX_TRAFFIC_CLASS 4
@@ -461,6 +464,10 @@ static inline const char *iavf_state_str(enum iavf_state_t state)
                return "__IAVF_INIT_VERSION_CHECK";
        case __IAVF_INIT_GET_RESOURCES:
                return "__IAVF_INIT_GET_RESOURCES";
+       case __IAVF_INIT_EXTENDED_CAPS:
+               return "__IAVF_INIT_EXTENDED_CAPS";
+       case __IAVF_INIT_CONFIG_ADAPTER:
+               return "__IAVF_INIT_CONFIG_ADAPTER";
        case __IAVF_INIT_SW:
                return "__IAVF_INIT_SW";
        case __IAVF_INIT_FAILED:
@@ -520,6 +527,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter);
 int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter);
 int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter);
 void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
+u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter);
 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
 void iavf_configure_queues(struct iavf_adapter *adapter);
 void iavf_deconfigure_queues(struct iavf_adapter *adapter);
index 3bb5671..e535d4c 100644 (file)
@@ -692,12 +692,8 @@ static int __iavf_get_coalesce(struct net_device *netdev,
                               struct ethtool_coalesce *ec, int queue)
 {
        struct iavf_adapter *adapter = netdev_priv(netdev);
-       struct iavf_vsi *vsi = &adapter->vsi;
        struct iavf_ring *rx_ring, *tx_ring;
 
-       ec->tx_max_coalesced_frames = vsi->work_limit;
-       ec->rx_max_coalesced_frames = vsi->work_limit;
-
        /* Rx and Tx usecs per queue value. If user doesn't specify the
         * queue, return queue 0's value to represent.
         */
@@ -825,12 +821,8 @@ static int __iavf_set_coalesce(struct net_device *netdev,
                               struct ethtool_coalesce *ec, int queue)
 {
        struct iavf_adapter *adapter = netdev_priv(netdev);
-       struct iavf_vsi *vsi = &adapter->vsi;
        int i;
 
-       if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
-               vsi->work_limit = ec->tx_max_coalesced_frames_irq;
-
        if (ec->rx_coalesce_usecs == 0) {
                if (ec->use_adaptive_rx_coalesce)
                        netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
@@ -1969,8 +1961,6 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
 
 static const struct ethtool_ops iavf_ethtool_ops = {
        .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
-                                    ETHTOOL_COALESCE_MAX_FRAMES |
-                                    ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
                                     ETHTOOL_COALESCE_USE_ADAPTIVE,
        .get_drvinfo            = iavf_get_drvinfo,
        .get_link               = ethtool_op_get_link,
index 7dfcf78..2e2c153 100644 (file)
@@ -843,7 +843,7 @@ static void iavf_restore_filters(struct iavf_adapter *adapter)
  * iavf_get_num_vlans_added - get number of VLANs added
  * @adapter: board private structure
  */
-static u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
+u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
 {
        return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
                bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
@@ -906,11 +906,6 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev,
        if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
                return -ENOMEM;
 
-       if (proto == cpu_to_be16(ETH_P_8021Q))
-               set_bit(vid, adapter->vsi.active_cvlans);
-       else
-               set_bit(vid, adapter->vsi.active_svlans);
-
        return 0;
 }
 
@@ -984,7 +979,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
                list_add_tail(&f->list, &adapter->mac_filter_list);
                f->add = true;
                f->is_new_mac = true;
-               f->is_primary = false;
+               f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
                adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
        } else {
                f->remove = false;
@@ -2245,7 +2240,6 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
 
        adapter->vsi.back = adapter;
        adapter->vsi.base_vector = 1;
-       adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
        vsi->netdev = adapter->netdev;
        vsi->qs_handle = adapter->vsi_res->qset_handle;
        if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
@@ -2956,6 +2950,9 @@ continue_reset:
        adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
        iavf_misc_irq_enable(adapter);
 
+       bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
+       bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
+
        mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
 
        /* We were running when the reset started, so we need to restore some
index 978f651..06d1879 100644 (file)
@@ -194,7 +194,7 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
        struct iavf_tx_buffer *tx_buf;
        struct iavf_tx_desc *tx_desc;
        unsigned int total_bytes = 0, total_packets = 0;
-       unsigned int budget = vsi->work_limit;
+       unsigned int budget = IAVF_DEFAULT_IRQ_WORK;
 
        tx_buf = &tx_ring->tx_bi[i];
        tx_desc = IAVF_TX_DESC(tx_ring, i);
@@ -1285,11 +1285,10 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
 {
        struct iavf_rx_buffer *rx_buffer;
 
-       if (!size)
-               return NULL;
-
        rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
        prefetchw(rx_buffer->page);
+       if (!size)
+               return rx_buffer;
 
        /* we are reusing so sync this buffer for CPU use */
        dma_sync_single_range_for_cpu(rx_ring->dev,
index 782450d..1603e99 100644 (file)
@@ -627,6 +627,33 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter)
 }
 
 /**
+ * iavf_vlan_add_reject
+ * @adapter: adapter structure
+ *
+ * Remove VLAN filters from list based on PF response.
+ **/
+static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
+{
+       struct iavf_vlan_filter *f, *ftmp;
+
+       spin_lock_bh(&adapter->mac_vlan_list_lock);
+       list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+               if (f->is_new_vlan) {
+                       if (f->vlan.tpid == ETH_P_8021Q)
+                               clear_bit(f->vlan.vid,
+                                         adapter->vsi.active_cvlans);
+                       else
+                               clear_bit(f->vlan.vid,
+                                         adapter->vsi.active_svlans);
+
+                       list_del(&f->list);
+                       kfree(f);
+               }
+       }
+       spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
  * iavf_add_vlans
  * @adapter: adapter structure
  *
@@ -683,6 +710,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
                                vvfl->vlan_id[i] = f->vlan.vid;
                                i++;
                                f->add = false;
+                               f->is_new_vlan = true;
                                if (i == count)
                                        break;
                        }
@@ -695,10 +723,18 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
                iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
                kfree(vvfl);
        } else {
+               u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters;
+               u16 current_vlans = iavf_get_num_vlans_added(adapter);
                struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
 
                adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2;
 
+               if ((count + current_vlans) > max_vlans &&
+                   current_vlans < max_vlans) {
+                       count = max_vlans - iavf_get_num_vlans_added(adapter);
+                       more = true;
+               }
+
                len = sizeof(*vvfl_v2) + ((count - 1) *
                                          sizeof(struct virtchnl_vlan_filter));
                if (len > IAVF_MAX_AQ_BUF_SIZE) {
@@ -725,6 +761,9 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
                                        &adapter->vlan_v2_caps.filtering.filtering_support;
                                struct virtchnl_vlan *vlan;
 
+                               if (i == count)
+                                       break;
+
                                /* give priority over outer if it's enabled */
                                if (filtering_support->outer)
                                        vlan = &vvfl_v2->filters[i].outer;
@@ -736,8 +775,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
 
                                i++;
                                f->add = false;
-                               if (i == count)
-                                       break;
+                               f->is_new_vlan = true;
                        }
                }
 
@@ -2080,6 +2118,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                         */
                        iavf_netdev_features_vlan_strip_set(netdev, true);
                        break;
+               case VIRTCHNL_OP_ADD_VLAN_V2:
+                       iavf_vlan_add_reject(adapter);
+                       dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
+                                iavf_stat_str(&adapter->hw, v_retval));
+                       break;
                default:
                        dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
                                v_retval, iavf_stat_str(&adapter->hw, v_retval),
@@ -2332,6 +2375,24 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                spin_unlock_bh(&adapter->adv_rss_lock);
                }
                break;
+       case VIRTCHNL_OP_ADD_VLAN_V2: {
+               struct iavf_vlan_filter *f;
+
+               spin_lock_bh(&adapter->mac_vlan_list_lock);
+               list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+                       if (f->is_new_vlan) {
+                               f->is_new_vlan = false;
+                               if (f->vlan.tpid == ETH_P_8021Q)
+                                       set_bit(f->vlan.vid,
+                                               adapter->vsi.active_cvlans);
+                               else
+                                       set_bit(f->vlan.vid,
+                                               adapter->vsi.active_svlans);
+                       }
+               }
+               spin_unlock_bh(&adapter->mac_vlan_list_lock);
+               }
+               break;
        case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
                /* PF enabled vlan strip on this VF.
                 * Update netdev->features if needed to be in sync with ethtool.
index 61dd2f1..b41bc3d 100644 (file)
@@ -5,6 +5,7 @@
 #define _ICE_DEVIDS_H_
 
 /* Device IDs */
+#define ICE_DEV_ID_E822_SI_DFLT         0x1888
 /* Intel(R) Ethernet Connection E823-L for backplane */
 #define ICE_DEV_ID_E823L_BACKPLANE     0x124C
 /* Intel(R) Ethernet Connection E823-L for SFP */
index 3991d62..3337314 100644 (file)
@@ -814,6 +814,8 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf)
        devlink_port_unregister(devlink_port);
 }
 
+#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
+
 /**
  * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents
  * @devlink: the devlink instance
@@ -840,8 +842,9 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
        struct ice_pf *pf = devlink_priv(devlink);
        struct device *dev = ice_pf_to_dev(pf);
        struct ice_hw *hw = &pf->hw;
-       void *nvm_data;
-       u32 nvm_size;
+       u8 *nvm_data, *tmp, i;
+       u32 nvm_size, left;
+       s8 num_blks;
        int status;
 
        nvm_size = hw->flash.flash_size;
@@ -849,26 +852,44 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
        if (!nvm_data)
                return -ENOMEM;
 
-       status = ice_acquire_nvm(hw, ICE_RES_READ);
-       if (status) {
-               dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
-                       status, hw->adminq.sq_last_status);
-               NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
-               vfree(nvm_data);
-               return status;
-       }
 
-       status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false);
-       if (status) {
-               dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
-                       nvm_size, status, hw->adminq.sq_last_status);
-               NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
+       num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE);
+       tmp = nvm_data;
+       left = nvm_size;
+
+       /* Some systems take longer to read the NVM than others which causes the
+        * FW to reclaim the NVM lock before the entire NVM has been read. Fix
+        * this by breaking the reads of the NVM into smaller chunks that will
+        * probably not take as long. This has some overhead since we are
+        * increasing the number of AQ commands, but it should always work
+        */
+       for (i = 0; i < num_blks; i++) {
+               u32 read_sz = min_t(u32, ICE_DEVLINK_READ_BLK_SIZE, left);
+
+               status = ice_acquire_nvm(hw, ICE_RES_READ);
+               if (status) {
+                       dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+                               status, hw->adminq.sq_last_status);
+                       NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+                       vfree(nvm_data);
+                       return -EIO;
+               }
+
+               status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE,
+                                          &read_sz, tmp, false);
+               if (status) {
+                       dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
+                               read_sz, status, hw->adminq.sq_last_status);
+                       NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
+                       ice_release_nvm(hw);
+                       vfree(nvm_data);
+                       return -EIO;
+               }
                ice_release_nvm(hw);
-               vfree(nvm_data);
-               return status;
-       }
 
-       ice_release_nvm(hw);
+               tmp += read_sz;
+               left -= read_sz;
+       }
 
        *data = nvm_data;
 
index 1e71b70..4efa5e5 100644 (file)
@@ -658,7 +658,8 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
                rx_desc = ICE_RX_DESC(rx_ring, i);
 
                if (!(rx_desc->wb.status_error0 &
-                   cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)))
+                   (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) |
+                    cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
                        continue;
 
                rx_buf = &rx_ring->rx_buf[i];
@@ -2190,6 +2191,42 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
 }
 
 /**
+ * ice_set_phy_type_from_speed - set phy_types based on speeds
+ * and advertised modes
+ * @ks: ethtool link ksettings struct
+ * @phy_type_low: pointer to the lower part of phy_type
+ * @phy_type_high: pointer to the higher part of phy_type
+ * @adv_link_speed: targeted link speeds bitmap
+ */
+static void
+ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks,
+                           u64 *phy_type_low, u64 *phy_type_high,
+                           u16 adv_link_speed)
+{
+       /* Handle 1000M speed in a special way because ice_update_phy_type
+        * enables all link modes, but having mixed copper and optical
+        * standards is not supported.
+        */
+       adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB;
+
+       if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+                                                 1000baseT_Full))
+               *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T |
+                                ICE_PHY_TYPE_LOW_1G_SGMII;
+
+       if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+                                                 1000baseKX_Full))
+               *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX;
+
+       if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+                                                 1000baseX_Full))
+               *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX |
+                                ICE_PHY_TYPE_LOW_1000BASE_LX;
+
+       ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed);
+}
+
+/**
  * ice_set_link_ksettings - Set Speed and Duplex
  * @netdev: network interface device structure
  * @ks: ethtool ksettings
@@ -2320,7 +2357,8 @@ ice_set_link_ksettings(struct net_device *netdev,
                adv_link_speed = curr_link_speed;
 
        /* Convert the advertise link speeds to their corresponded PHY_TYPE */
-       ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed);
+       ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high,
+                                   adv_link_speed);
 
        if (!autoneg_changed && adv_link_speed == curr_link_speed) {
                netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -3470,6 +3508,16 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
        new_rx = ch->combined_count + ch->rx_count;
        new_tx = ch->combined_count + ch->tx_count;
 
+       if (new_rx < vsi->tc_cfg.numtc) {
+               netdev_err(dev, "Cannot set less Rx channels, than Traffic Classes you have (%u)\n",
+                          vsi->tc_cfg.numtc);
+               return -EINVAL;
+       }
+       if (new_tx < vsi->tc_cfg.numtc) {
+               netdev_err(dev, "Cannot set less Tx channels, than Traffic Classes you have (%u)\n",
+                          vsi->tc_cfg.numtc);
+               return -EINVAL;
+       }
        if (new_rx > ice_get_max_rxq(pf)) {
                netdev_err(dev, "Maximum allowed Rx channels is %d\n",
                           ice_get_max_rxq(pf));
index 665a344..3dc5662 100644 (file)
@@ -736,7 +736,87 @@ static int ice_finalize_update(struct pldmfw *context)
        return 0;
 }
 
-static const struct pldmfw_ops ice_fwu_ops = {
+struct ice_pldm_pci_record_id {
+       u32 vendor;
+       u32 device;
+       u32 subsystem_vendor;
+       u32 subsystem_device;
+};
+
+/**
+ * ice_op_pci_match_record - Check if a PCI device matches the record
+ * @context: PLDM fw update structure
+ * @record: list of records extracted from the PLDM image
+ *
+ * Determine if the PCI device associated with this device matches the record
+ * data provided.
+ *
+ * Searches the descriptor TLVs and extracts the relevant descriptor data into
+ * a pldm_pci_record_id. This is then compared against the PCI device ID
+ * information.
+ *
+ * Returns: true if the device matches the record, false otherwise.
+ */
+static bool
+ice_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record)
+{
+       struct pci_dev *pdev = to_pci_dev(context->dev);
+       struct ice_pldm_pci_record_id id = {
+               .vendor = PCI_ANY_ID,
+               .device = PCI_ANY_ID,
+               .subsystem_vendor = PCI_ANY_ID,
+               .subsystem_device = PCI_ANY_ID,
+       };
+       struct pldmfw_desc_tlv *desc;
+
+       list_for_each_entry(desc, &record->descs, entry) {
+               u16 value;
+               int *ptr;
+
+               switch (desc->type) {
+               case PLDM_DESC_ID_PCI_VENDOR_ID:
+                       ptr = &id.vendor;
+                       break;
+               case PLDM_DESC_ID_PCI_DEVICE_ID:
+                       ptr = &id.device;
+                       break;
+               case PLDM_DESC_ID_PCI_SUBVENDOR_ID:
+                       ptr = &id.subsystem_vendor;
+                       break;
+               case PLDM_DESC_ID_PCI_SUBDEV_ID:
+                       ptr = &id.subsystem_device;
+                       break;
+               default:
+                       /* Skip unrelated TLVs */
+                       continue;
+               }
+
+               value = get_unaligned_le16(desc->data);
+               /* A value of zero for one of the descriptors is sometimes
+                * used when the record should ignore this field when matching
+                * device. For example if the record applies to any subsystem
+                * device or vendor.
+                */
+               if (value)
+                       *ptr = value;
+               else
+                       *ptr = PCI_ANY_ID;
+       }
+
+       /* the E822 device can have a generic device ID so check for that */
+       if ((id.vendor == PCI_ANY_ID || id.vendor == pdev->vendor) &&
+           (id.device == PCI_ANY_ID || id.device == pdev->device ||
+           id.device == ICE_DEV_ID_E822_SI_DFLT) &&
+           (id.subsystem_vendor == PCI_ANY_ID ||
+           id.subsystem_vendor == pdev->subsystem_vendor) &&
+           (id.subsystem_device == PCI_ANY_ID ||
+           id.subsystem_device == pdev->subsystem_device))
+               return true;
+
+       return false;
+}
+
+static const struct pldmfw_ops ice_fwu_ops_e810 = {
        .match_record = &pldmfw_op_pci_match_record,
        .send_package_data = &ice_send_package_data,
        .send_component_table = &ice_send_component_table,
@@ -744,6 +824,14 @@ static const struct pldmfw_ops ice_fwu_ops = {
        .finalize_update = &ice_finalize_update,
 };
 
+static const struct pldmfw_ops ice_fwu_ops_e822 = {
+       .match_record = &ice_op_pci_match_record,
+       .send_package_data = &ice_send_package_data,
+       .send_component_table = &ice_send_component_table,
+       .flash_component = &ice_flash_component,
+       .finalize_update = &ice_finalize_update,
+};
+
 /**
  * ice_get_pending_updates - Check if the component has a pending update
  * @pf: the PF driver structure
@@ -921,7 +1009,11 @@ int ice_devlink_flash_update(struct devlink *devlink,
 
        memset(&priv, 0, sizeof(priv));
 
-       priv.context.ops = &ice_fwu_ops;
+       /* the E822 device needs a slightly different ops */
+       if (hw->mac_type == ICE_MAC_GENERIC)
+               priv.context.ops = &ice_fwu_ops_e822;
+       else
+               priv.context.ops = &ice_fwu_ops_e810;
        priv.context.dev = dev;
        priv.extack = extack;
        priv.pf = pf;
index 454e01a..f7f9c97 100644 (file)
@@ -909,7 +909,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
  * @vsi: the VSI being configured
  * @ctxt: VSI context structure
  */
-static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
 {
        u16 offset = 0, qmap = 0, tx_count = 0, pow = 0;
        u16 num_txq_per_tc, num_rxq_per_tc;
@@ -982,7 +982,18 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
        else
                vsi->num_rxq = num_rxq_per_tc;
 
+       if (vsi->num_rxq > vsi->alloc_rxq) {
+               dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
+                       vsi->num_rxq, vsi->alloc_rxq);
+               return -EINVAL;
+       }
+
        vsi->num_txq = tx_count;
+       if (vsi->num_txq > vsi->alloc_txq) {
+               dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
+                       vsi->num_txq, vsi->alloc_txq);
+               return -EINVAL;
+       }
 
        if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
                dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
@@ -1000,6 +1011,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
         */
        ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
        ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
+
+       return 0;
 }
 
 /**
@@ -1187,7 +1200,10 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
        if (vsi->type == ICE_VSI_CHNL) {
                ice_chnl_vsi_setup_q_map(vsi, ctxt);
        } else {
-               ice_vsi_setup_q_map(vsi, ctxt);
+               ret = ice_vsi_setup_q_map(vsi, ctxt);
+               if (ret)
+                       goto out;
+
                if (!init_vsi) /* means VSI being updated */
                        /* must to indicate which section of VSI context are
                         * being modified
@@ -3464,7 +3480,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
  *
  * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
  */
-static void
+static int
 ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
                           u8 ena_tc)
 {
@@ -3513,7 +3529,18 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
 
        /* Set actual Tx/Rx queue pairs */
        vsi->num_txq = offset + qcount_tx;
+       if (vsi->num_txq > vsi->alloc_txq) {
+               dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
+                       vsi->num_txq, vsi->alloc_txq);
+               return -EINVAL;
+       }
+
        vsi->num_rxq = offset + qcount_rx;
+       if (vsi->num_rxq > vsi->alloc_rxq) {
+               dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
+                       vsi->num_rxq, vsi->alloc_rxq);
+               return -EINVAL;
+       }
 
        /* Setup queue TC[0].qmap for given VSI context */
        ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
@@ -3531,6 +3558,8 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
        dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n",  vsi->num_rxq);
        dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
                vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
+
+       return 0;
 }
 
 /**
@@ -3580,9 +3609,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
 
        if (vsi->type == ICE_VSI_PF &&
            test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
-               ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
+               ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
        else
-               ice_vsi_setup_q_map(vsi, ctx);
+               ret = ice_vsi_setup_q_map(vsi, ctx);
+
+       if (ret)
+               goto out;
 
        /* must to indicate which section of VSI context are being modified */
        ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
index e1cae25..9f02b60 100644 (file)
@@ -4656,6 +4656,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
                ice_set_safe_mode_caps(hw);
        }
 
+       hw->ucast_shared = true;
+
        err = ice_init_pf(pf);
        if (err) {
                dev_err(dev, "ice_init_pf failed: %d\n", err);
@@ -5413,6 +5415,7 @@ static const struct pci_device_id ice_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
        { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
+       { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
        /* required last entry */
        { 0, }
 };
@@ -5763,25 +5766,38 @@ static netdev_features_t
 ice_fix_features(struct net_device *netdev, netdev_features_t features)
 {
        struct ice_netdev_priv *np = netdev_priv(netdev);
-       netdev_features_t supported_vlan_filtering;
-       netdev_features_t requested_vlan_filtering;
-       struct ice_vsi *vsi = np->vsi;
-
-       requested_vlan_filtering = features & NETIF_VLAN_FILTERING_FEATURES;
-
-       /* make sure supported_vlan_filtering works for both SVM and DVM */
-       supported_vlan_filtering = NETIF_F_HW_VLAN_CTAG_FILTER;
-       if (ice_is_dvm_ena(&vsi->back->hw))
-               supported_vlan_filtering |= NETIF_F_HW_VLAN_STAG_FILTER;
-
-       if (requested_vlan_filtering &&
-           requested_vlan_filtering != supported_vlan_filtering) {
-               if (requested_vlan_filtering & NETIF_F_HW_VLAN_CTAG_FILTER) {
-                       netdev_warn(netdev, "cannot support requested VLAN filtering settings, enabling all supported VLAN filtering settings\n");
-                       features |= supported_vlan_filtering;
+       netdev_features_t req_vlan_fltr, cur_vlan_fltr;
+       bool cur_ctag, cur_stag, req_ctag, req_stag;
+
+       cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
+       cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
+       cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
+
+       req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
+       req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
+       req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
+
+       if (req_vlan_fltr != cur_vlan_fltr) {
+               if (ice_is_dvm_ena(&np->vsi->back->hw)) {
+                       if (req_ctag && req_stag) {
+                               features |= NETIF_VLAN_FILTERING_FEATURES;
+                       } else if (!req_ctag && !req_stag) {
+                               features &= ~NETIF_VLAN_FILTERING_FEATURES;
+                       } else if ((!cur_ctag && req_ctag && !cur_stag) ||
+                                  (!cur_stag && req_stag && !cur_ctag)) {
+                               features |= NETIF_VLAN_FILTERING_FEATURES;
+                               netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
+                       } else if ((cur_ctag && !req_ctag && cur_stag) ||
+                                  (cur_stag && !req_stag && cur_ctag)) {
+                               features &= ~NETIF_VLAN_FILTERING_FEATURES;
+                               netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
+                       }
                } else {
-                       netdev_warn(netdev, "cannot support requested VLAN filtering settings, clearing all supported VLAN filtering settings\n");
-                       features &= ~supported_vlan_filtering;
+                       if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
+                               netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
+
+                       if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
+                               features |= NETIF_F_HW_VLAN_CTAG_FILTER;
                }
        }
 
@@ -5997,10 +6013,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
        if (vsi->netdev) {
                ice_set_rx_mode(vsi->netdev);
 
-               err = ice_vsi_vlan_setup(vsi);
+               if (vsi->type != ICE_VSI_LB) {
+                       err = ice_vsi_vlan_setup(vsi);
 
-               if (err)
-                       return err;
+                       if (err)
+                               return err;
+               }
        }
        ice_vsi_cfg_dcb_rings(vsi);
 
index 662947c..ef9344e 100644 (file)
@@ -2271,7 +2271,7 @@ static int
 ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
 {
        tx->quad = port / ICE_PORTS_PER_QUAD;
-       tx->quad_offset = tx->quad * INDEX_PER_PORT;
+       tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT;
        tx->len = INDEX_PER_PORT;
 
        return ice_ptp_alloc_tx_tracker(tx);
index afd048d..10e396a 100644 (file)
@@ -49,6 +49,37 @@ struct ice_perout_channel {
  * To allow multiple ports to access the shared register block independently,
  * the blocks are split up so that indexes are assigned to each port based on
  * hardware logical port number.
+ *
+ * The timestamp blocks are handled differently for E810- and E822-based
+ * devices. In E810 devices, each port has its own block of timestamps, while in
+ * E822 there is a need to logically break the block of registers into smaller
+ * chunks based on the port number to avoid collisions.
+ *
+ * Example for port 5 in E810:
+ *  +--------+--------+--------+--------+--------+--------+--------+--------+
+ *  |register|register|register|register|register|register|register|register|
+ *  | block  | block  | block  | block  | block  | block  | block  | block  |
+ *  |  for   |  for   |  for   |  for   |  for   |  for   |  for   |  for   |
+ *  | port 0 | port 1 | port 2 | port 3 | port 4 | port 5 | port 6 | port 7 |
+ *  +--------+--------+--------+--------+--------+--------+--------+--------+
+ *                                               ^^
+ *                                               ||
+ *                                               |---  quad offset is always 0
+ *                                               ---- quad number
+ *
+ * Example for port 5 in E822:
+ * +-----------------------------+-----------------------------+
+ * |  register block for quad 0  |  register block for quad 1  |
+ * |+------+------+------+------+|+------+------+------+------+|
+ * ||port 0|port 1|port 2|port 3|||port 0|port 1|port 2|port 3||
+ * |+------+------+------+------+|+------+------+------+------+|
+ * +-----------------------------+-------^---------------------+
+ *                                ^      |
+ *                                |      --- quad offset*
+ *                                ---- quad number
+ *
+ *   * PHY port 5 is port 1 in quad 1
+ *
  */
 
 /**
index bb1721f..f4907a3 100644 (file)
@@ -1310,39 +1310,6 @@ out_put_vf:
 }
 
 /**
- * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
- * @pf: PF used to reference the switch's rules
- * @umac: unicast MAC to compare against existing switch rules
- *
- * Return true on the first/any match, else return false
- */
-static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
-{
-       struct ice_sw_recipe *mac_recipe_list =
-               &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
-       struct ice_fltr_mgmt_list_entry *list_itr;
-       struct list_head *rule_head;
-       struct mutex *rule_lock; /* protect MAC filter list access */
-
-       rule_head = &mac_recipe_list->filt_rules;
-       rule_lock = &mac_recipe_list->filt_rule_lock;
-
-       mutex_lock(rule_lock);
-       list_for_each_entry(list_itr, rule_head, list_entry) {
-               u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
-
-               if (ether_addr_equal(existing_mac, umac)) {
-                       mutex_unlock(rule_lock);
-                       return true;
-               }
-       }
-
-       mutex_unlock(rule_lock);
-
-       return false;
-}
-
-/**
  * ice_set_vf_mac
  * @netdev: network interface device structure
  * @vf_id: VF identifier
@@ -1376,13 +1343,6 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        if (ret)
                goto out_put_vf;
 
-       if (ice_unicast_mac_exists(pf, mac)) {
-               netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
-                          mac, vf_id, mac);
-               ret = -EINVAL;
-               goto out_put_vf;
-       }
-
        mutex_lock(&vf->cfg_lock);
 
        /* VF is notified of its new MAC via the PF's response to the
index 0a0c55f..b803f2a 100644 (file)
@@ -524,6 +524,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
         */
        fltr->rid = rule_added.rid;
        fltr->rule_id = rule_added.rule_id;
+       fltr->dest_id = rule_added.vsi_handle;
 
 exit:
        kfree(list);
@@ -993,7 +994,9 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
                n_proto_key = ntohs(match.key->n_proto);
                n_proto_mask = ntohs(match.mask->n_proto);
 
-               if (n_proto_key == ETH_P_ALL || n_proto_key == 0) {
+               if (n_proto_key == ETH_P_ALL || n_proto_key == 0 ||
+                   fltr->tunnel_type == TNL_GTPU ||
+                   fltr->tunnel_type == TNL_GTPC) {
                        n_proto_key = 0;
                        n_proto_mask = 0;
                } else {
index 3f8b727..836dce8 100644 (file)
@@ -1751,11 +1751,13 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
 
        protocol = vlan_get_protocol(skb);
 
-       if (eth_p_mpls(protocol))
+       if (eth_p_mpls(protocol)) {
                ip.hdr = skb_inner_network_header(skb);
-       else
+               l4.hdr = skb_checksum_start(skb);
+       } else {
                ip.hdr = skb_network_header(skb);
-       l4.hdr = skb_checksum_start(skb);
+               l4.hdr = skb_transport_header(skb);
+       }
 
        /* compute outer L2 header size */
        l2_len = ip.hdr - skb->data;
index cd8e6b5..7adf9dd 100644 (file)
@@ -504,6 +504,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
        }
 
        if (ice_is_vf_disabled(vf)) {
+               vsi = ice_get_vf_vsi(vf);
+               if (WARN_ON(!vsi))
+                       return -EINVAL;
+               ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
+               ice_vsi_stop_all_rx_rings(vsi);
                dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
                        vf->vf_id);
                return 0;
index 1d9b84c..24188ec 100644 (file)
@@ -1569,35 +1569,27 @@ error_param:
  */
 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
 {
-       enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
        struct virtchnl_vsi_queue_config_info *qci =
            (struct virtchnl_vsi_queue_config_info *)msg;
        struct virtchnl_queue_pair_info *qpi;
        struct ice_pf *pf = vf->pf;
        struct ice_vsi *vsi;
-       int i, q_idx;
+       int i = -1, q_idx;
 
-       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+       if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
                goto error_param;
-       }
 
-       if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+       if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
                goto error_param;
-       }
 
        vsi = ice_get_vf_vsi(vf);
-       if (!vsi) {
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+       if (!vsi)
                goto error_param;
-       }
 
        if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
            qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
                dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
                        vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
-               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
                goto error_param;
        }
 
@@ -1610,7 +1602,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
                    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
                    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
                    !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
-                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
                        goto error_param;
                }
 
@@ -1620,7 +1611,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
                 * for selected "vsi"
                 */
                if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
-                       v_ret = VIRTCHNL_STATUS_ERR_PARAM;
                        goto error_param;
                }
 
@@ -1630,14 +1620,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
                        vsi->tx_rings[i]->count = qpi->txq.ring_len;
 
                        /* Disable any existing queue first */
-                       if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) {
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                       if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
                                goto error_param;
-                       }
 
                        /* Configure a queue with the requested settings */
                        if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
+                                        vf->vf_id, i);
                                goto error_param;
                        }
                }
@@ -1651,17 +1640,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
 
                        if (qpi->rxq.databuffer_size != 0 &&
                            (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
-                            qpi->rxq.databuffer_size < 1024)) {
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                            qpi->rxq.databuffer_size < 1024))
                                goto error_param;
-                       }
                        vsi->rx_buf_len = qpi->rxq.databuffer_size;
                        vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
                        if (qpi->rxq.max_pkt_size > max_frame_size ||
-                           qpi->rxq.max_pkt_size < 64) {
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                           qpi->rxq.max_pkt_size < 64)
                                goto error_param;
-                       }
 
                        vsi->max_frame = qpi->rxq.max_pkt_size;
                        /* add space for the port VLAN since the VF driver is
@@ -1672,16 +1657,30 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
                                vsi->max_frame += VLAN_HLEN;
 
                        if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
-                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
+                                        vf->vf_id, i);
                                goto error_param;
                        }
                }
        }
 
+       /* send the response to the VF */
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+                                    VIRTCHNL_STATUS_SUCCESS, NULL, 0);
 error_param:
+       /* disable whatever we can */
+       for (; i >= 0; i--) {
+               if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
+                       dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
+                               vf->vf_id, i);
+               if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
+                       dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
+                               vf->vf_id, i);
+       }
+
        /* send the response to the VF */
-       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
-                                    NULL, 0);
+       return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+                                    VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
 }
 
 /**
@@ -2949,7 +2948,8 @@ ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
                                     struct virtchnl_vlan_filtering_caps *vfc,
                                     struct virtchnl_vlan_filter_list_v2 *vfl)
 {
-       u16 num_requested_filters = vsi->num_vlan + vfl->num_elements;
+       u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) +
+               vfl->num_elements;
 
        if (num_requested_filters > vfc->max_filters)
                return false;
index 68be297..c5f04c4 100644 (file)
@@ -4819,8 +4819,11 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
        while (i != tx_ring->next_to_use) {
                union e1000_adv_tx_desc *eop_desc, *tx_desc;
 
-               /* Free all the Tx ring sk_buffs */
-               dev_kfree_skb_any(tx_buffer->skb);
+               /* Free all the Tx ring sk_buffs or xdp frames */
+               if (tx_buffer->type == IGB_TYPE_SKB)
+                       dev_kfree_skb_any(tx_buffer->skb);
+               else
+                       xdp_return_frame(tx_buffer->xdpf);
 
                /* unmap skb header data */
                dma_unmap_single(tx_ring->dev,
@@ -9898,11 +9901,10 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
        struct e1000_hw *hw = &adapter->hw;
        u32 dmac_thr;
        u16 hwm;
+       u32 reg;
 
        if (hw->mac.type > e1000_82580) {
                if (adapter->flags & IGB_FLAG_DMAC) {
-                       u32 reg;
-
                        /* force threshold to 0. */
                        wr32(E1000_DMCTXTH, 0);
 
@@ -9935,7 +9937,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
                        /* Disable BMC-to-OS Watchdog Enable */
                        if (hw->mac.type != e1000_i354)
                                reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
-
                        wr32(E1000_DMACR, reg);
 
                        /* no lower threshold to disable
@@ -9952,12 +9953,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
                         */
                        wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
                             (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
+               }
 
-                       /* make low power state decision controlled
-                        * by DMA coal
-                        */
+               if (hw->mac.type >= e1000_i210 ||
+                   (adapter->flags & IGB_FLAG_DMAC)) {
                        reg = rd32(E1000_PCIEMISC);
-                       reg &= ~E1000_PCIEMISC_LX_DECISION;
+                       reg |= E1000_PCIEMISC_LX_DECISION;
                        wr32(E1000_PCIEMISC, reg);
                } /* endif adapter->dmac is not disabled */
        } else if (hw->mac.type == e1000_82580) {
index ae17af4..a5ebee7 100644 (file)
@@ -6171,6 +6171,9 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
        u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
        u32 value = 0;
 
+       if (IGC_REMOVED(hw_addr))
+               return ~value;
+
        value = readl(&hw_addr[reg]);
 
        /* reads should not return all F's */
index e197a33..026c3b6 100644 (file)
@@ -306,7 +306,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg);
 #define wr32(reg, val) \
 do { \
        u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
-       writel((val), &hw_addr[(reg)]); \
+       if (!IGC_REMOVED(hw_addr)) \
+               writel((val), &hw_addr[(reg)]); \
 } while (0)
 
 #define rd32(reg) (igc_rd32(hw, reg))
@@ -318,4 +319,6 @@ do { \
 
 #define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2)))
 
+#define IGC_REMOVED(h) unlikely(!(h))
+
 #endif
index 921a4d9..8813b4d 100644 (file)
@@ -779,6 +779,7 @@ struct ixgbe_adapter {
 #ifdef CONFIG_IXGBE_IPSEC
        struct ixgbe_ipsec *ipsec;
 #endif /* CONFIG_IXGBE_IPSEC */
+       spinlock_t vfs_lock;
 };
 
 static inline int ixgbe_determine_xdp_q_idx(int cpu)
index 77c2e70..55f91c9 100644 (file)
@@ -6403,6 +6403,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
        /* n-tuple support exists, always init our spinlock */
        spin_lock_init(&adapter->fdir_perfect_lock);
 
+       /* init spinlock to avoid concurrency of VF resources */
+       spin_lock_init(&adapter->vfs_lock);
+
 #ifdef CONFIG_IXGBE_DCB
        ixgbe_init_dcb(adapter);
 #endif
index d4e63f0..a1e69c7 100644 (file)
@@ -205,10 +205,13 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
 int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
 {
        unsigned int num_vfs = adapter->num_vfs, vf;
+       unsigned long flags;
        int rss;
 
+       spin_lock_irqsave(&adapter->vfs_lock, flags);
        /* set num VFs to 0 to prevent access to vfinfo */
        adapter->num_vfs = 0;
+       spin_unlock_irqrestore(&adapter->vfs_lock, flags);
 
        /* put the reference to all of the vf devices */
        for (vf = 0; vf < num_vfs; ++vf) {
@@ -1355,8 +1358,10 @@ static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
 void ixgbe_msg_task(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       unsigned long flags;
        u32 vf;
 
+       spin_lock_irqsave(&adapter->vfs_lock, flags);
        for (vf = 0; vf < adapter->num_vfs; vf++) {
                /* process any reset requests */
                if (!ixgbe_check_for_rst(hw, vf))
@@ -1370,6 +1375,7 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter)
                if (!ixgbe_check_for_ack(hw, vf))
                        ixgbe_rcv_ack_from_vf(adapter, vf);
        }
+       spin_unlock_irqrestore(&adapter->vfs_lock, flags);
 }
 
 static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
index cc51149..3d5d39a 100644 (file)
@@ -52,7 +52,7 @@
 
 #define    CN93_SDP_EPF_RINFO_SRN(val)           ((val) & 0xFF)
 #define    CN93_SDP_EPF_RINFO_RPVF(val)          (((val) >> 32) & 0xF)
-#define    CN93_SDP_EPF_RINFO_NVFS(val)          (((val) >> 48) && 0xFF)
+#define    CN93_SDP_EPF_RINFO_NVFS(val)          (((val) >> 48) & 0xFF)
 
 /* SDP Function select */
 #define    CN93_SDP_FUNC_SEL_EPF_BIT_POS         8
index bc614a4..3f60a80 100644 (file)
@@ -1390,7 +1390,8 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
 
 static const struct ethtool_ops otx2vf_ethtool_ops = {
        .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
-                                    ETHTOOL_COALESCE_MAX_FRAMES,
+                                    ETHTOOL_COALESCE_MAX_FRAMES |
+                                    ETHTOOL_COALESCE_USE_ADAPTIVE,
        .supported_ring_params  = ETHTOOL_RING_USE_RX_BUF_LEN |
                                  ETHTOOL_RING_USE_CQE_SIZE,
        .get_link               = otx2_get_link,
index 28b1994..e64318c 100644 (file)
@@ -28,6 +28,9 @@
 #define MAX_RATE_EXPONENT              0x0FULL
 #define MAX_RATE_MANTISSA              0xFFULL
 
+#define CN10K_MAX_BURST_MANTISSA       0x7FFFULL
+#define CN10K_MAX_BURST_SIZE           8453888ULL
+
 /* Bitfields in NIX_TLX_PIR register */
 #define TLX_RATE_MANTISSA              GENMASK_ULL(8, 1)
 #define TLX_RATE_EXPONENT              GENMASK_ULL(12, 9)
@@ -35,6 +38,9 @@
 #define TLX_BURST_MANTISSA             GENMASK_ULL(36, 29)
 #define TLX_BURST_EXPONENT             GENMASK_ULL(40, 37)
 
+#define CN10K_TLX_BURST_MANTISSA       GENMASK_ULL(43, 29)
+#define CN10K_TLX_BURST_EXPONENT       GENMASK_ULL(47, 44)
+
 struct otx2_tc_flow_stats {
        u64 bytes;
        u64 pkts;
@@ -77,33 +83,42 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
 }
 EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
 
-static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
-                                     u32 *burst_mantissa)
+static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
+                                     u32 *burst_exp, u32 *burst_mantissa)
 {
+       int max_burst, max_mantissa;
        unsigned int tmp;
 
+       if (is_dev_otx2(nic->pdev)) {
+               max_burst = MAX_BURST_SIZE;
+               max_mantissa = MAX_BURST_MANTISSA;
+       } else {
+               max_burst = CN10K_MAX_BURST_SIZE;
+               max_mantissa = CN10K_MAX_BURST_MANTISSA;
+       }
+
        /* Burst is calculated as
         * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
         * Max supported burst size is 130,816 bytes.
         */
-       burst = min_t(u32, burst, MAX_BURST_SIZE);
+       burst = min_t(u32, burst, max_burst);
        if (burst) {
                *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
                tmp = burst - rounddown_pow_of_two(burst);
-               if (burst < MAX_BURST_MANTISSA)
+               if (burst < max_mantissa)
                        *burst_mantissa = tmp * 2;
                else
                        *burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
        } else {
                *burst_exp = MAX_BURST_EXPONENT;
-               *burst_mantissa = MAX_BURST_MANTISSA;
+               *burst_mantissa = max_mantissa;
        }
 }
 
-static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
+static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp,
                                     u32 *mantissa, u32 *div_exp)
 {
-       unsigned int tmp;
+       u64 tmp;
 
        /* Rate calculation by hardware
         *
@@ -132,21 +147,44 @@ static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
        }
 }
 
-static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
+static u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic,
+                                      u64 maxrate, u32 burst)
 {
-       struct otx2_hw *hw = &nic->hw;
-       struct nix_txschq_config *req;
        u32 burst_exp, burst_mantissa;
        u32 exp, mantissa, div_exp;
+       u64 regval = 0;
+
+       /* Get exponent and mantissa values from the desired rate */
+       otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
+       otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
+
+       if (is_dev_otx2(nic->pdev)) {
+               regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) |
+                               FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) |
+                               FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+                               FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+                               FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+       } else {
+               regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) |
+                               FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) |
+                               FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+                               FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+                               FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+       }
+
+       return regval;
+}
+
+static int otx2_set_matchall_egress_rate(struct otx2_nic *nic,
+                                        u32 burst, u64 maxrate)
+{
+       struct otx2_hw *hw = &nic->hw;
+       struct nix_txschq_config *req;
        int txschq, err;
 
        /* All SQs share the same TL4, so pick the first scheduler */
        txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
 
-       /* Get exponent and mantissa values from the desired rate */
-       otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
-       otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
-
        mutex_lock(&nic->mbox.lock);
        req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
        if (!req) {
@@ -157,11 +195,7 @@ static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 ma
        req->lvl = NIX_TXSCH_LVL_TL4;
        req->num_regs = 1;
        req->reg[0] = NIX_AF_TL4X_PIR(txschq);
-       req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
-                        FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
-                        FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
-                        FIELD_PREP(TLX_RATE_EXPONENT, exp) |
-                        FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+       req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst);
 
        err = otx2_sync_mbox_msg(&nic->mbox);
        mutex_unlock(&nic->mbox.lock);
@@ -230,7 +264,7 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
        struct netlink_ext_ack *extack = cls->common.extack;
        struct flow_action *actions = &cls->rule->action;
        struct flow_action_entry *entry;
-       u32 rate;
+       u64 rate;
        int err;
 
        err = otx2_tc_validate_flow(nic, actions, extack);
@@ -256,7 +290,7 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
                }
                /* Convert bytes per second to Mbps */
                rate = entry->police.rate_bytes_ps * 8;
-               rate = max_t(u32, rate / 1000000, 1);
+               rate = max_t(u64, rate / 1000000, 1);
                err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
                if (err)
                        return err;
@@ -614,21 +648,27 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
 
                flow_spec->dport = match.key->dst;
                flow_mask->dport = match.mask->dst;
-               if (ip_proto == IPPROTO_UDP)
-                       req->features |= BIT_ULL(NPC_DPORT_UDP);
-               else if (ip_proto == IPPROTO_TCP)
-                       req->features |= BIT_ULL(NPC_DPORT_TCP);
-               else if (ip_proto == IPPROTO_SCTP)
-                       req->features |= BIT_ULL(NPC_DPORT_SCTP);
+
+               if (flow_mask->dport) {
+                       if (ip_proto == IPPROTO_UDP)
+                               req->features |= BIT_ULL(NPC_DPORT_UDP);
+                       else if (ip_proto == IPPROTO_TCP)
+                               req->features |= BIT_ULL(NPC_DPORT_TCP);
+                       else if (ip_proto == IPPROTO_SCTP)
+                               req->features |= BIT_ULL(NPC_DPORT_SCTP);
+               }
 
                flow_spec->sport = match.key->src;
                flow_mask->sport = match.mask->src;
-               if (ip_proto == IPPROTO_UDP)
-                       req->features |= BIT_ULL(NPC_SPORT_UDP);
-               else if (ip_proto == IPPROTO_TCP)
-                       req->features |= BIT_ULL(NPC_SPORT_TCP);
-               else if (ip_proto == IPPROTO_SCTP)
-                       req->features |= BIT_ULL(NPC_SPORT_SCTP);
+
+               if (flow_mask->sport) {
+                       if (ip_proto == IPPROTO_UDP)
+                               req->features |= BIT_ULL(NPC_SPORT_UDP);
+                       else if (ip_proto == IPPROTO_TCP)
+                               req->features |= BIT_ULL(NPC_SPORT_TCP);
+                       else if (ip_proto == IPPROTO_SCTP)
+                               req->features |= BIT_ULL(NPC_SPORT_SCTP);
+               }
        }
 
        return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
index d43e503..4d93ad6 100644 (file)
@@ -167,12 +167,12 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
        }
        port = netdev_priv(ingress_dev);
 
-       mask = htons(0x1FFF);
-       key = htons(port->hw_id);
+       mask = htons(0x1FFF << 3);
+       key = htons(port->hw_id << 3);
        rule_match_set(r_match->key, SYS_PORT, key);
        rule_match_set(r_match->mask, SYS_PORT, mask);
 
-       mask = htons(0x1FF);
+       mask = htons(0x3FF);
        key = htons(port->dev_id);
        rule_match_set(r_match->key, SYS_DEV, key);
        rule_match_set(r_match->mask, SYS_DEV, mask);
index 3754d8a..3c8116f 100644 (file)
@@ -588,6 +588,7 @@ err_router_lib_init:
 
 void prestera_router_fini(struct prestera_switch *sw)
 {
+       unregister_fib_notifier(&init_net, &sw->router->fib_nb);
        unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
        unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
        rhashtable_destroy(&sw->router->kern_fib_cache_ht);
index 90e7dfd..5d457bc 100644 (file)
@@ -93,6 +93,9 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
        };
        struct net_device_path path = {};
 
+       if (!ctx.dev)
+               return -ENODEV;
+
        memcpy(ctx.daddr, addr, sizeof(ctx.daddr));
 
        if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
index 8f0cd31..29be2fc 100644 (file)
@@ -651,7 +651,7 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
         * WDMA RX.
         */
 
-       BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
+       BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring));
 
        if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
                return -ENOMEM;
index 25f51f8..ba171c7 100644 (file)
@@ -76,6 +76,7 @@ struct mlx5_tc_ct_priv {
        struct mlx5_ct_fs *fs;
        struct mlx5_ct_fs_ops *fs_ops;
        spinlock_t ht_lock; /* protects ft entries */
+       struct workqueue_struct *wq;
 
        struct mlx5_tc_ct_debugfs debugfs;
 };
@@ -941,14 +942,11 @@ static void mlx5_tc_ct_entry_del_work(struct work_struct *work)
 static void
 __mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
 {
-       struct mlx5e_priv *priv;
-
        if (!refcount_dec_and_test(&entry->refcnt))
                return;
 
-       priv = netdev_priv(entry->ct_priv->netdev);
        INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work);
-       queue_work(priv->wq, &entry->work);
+       queue_work(entry->ct_priv->wq, &entry->work);
 }
 
 static struct mlx5_ct_counter *
@@ -1759,19 +1757,16 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
 static void
 mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
 {
-       struct mlx5e_priv *priv;
-
        if (!refcount_dec_and_test(&ft->refcount))
                return;
 
+       flush_workqueue(ct_priv->wq);
        nf_flow_table_offload_del_cb(ft->nf_ft,
                                     mlx5_tc_ct_block_flow_offload, ft);
        rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
        rhashtable_free_and_destroy(&ft->ct_entries_ht,
                                    mlx5_tc_ct_flush_ft_entry,
                                    ct_priv);
-       priv = netdev_priv(ct_priv->netdev);
-       flush_workqueue(priv->wq);
        mlx5_tc_ct_free_pre_ct_tables(ft);
        mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
        kfree(ft);
@@ -2176,6 +2171,12 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
        if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))
                goto err_ct_tuples_nat_ht;
 
+       ct_priv->wq = alloc_ordered_workqueue("mlx5e_ct_priv_wq", 0);
+       if (!ct_priv->wq) {
+               err = -ENOMEM;
+               goto err_wq;
+       }
+
        err = mlx5_tc_ct_fs_init(ct_priv);
        if (err)
                goto err_init_fs;
@@ -2184,6 +2185,8 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
        return ct_priv;
 
 err_init_fs:
+       destroy_workqueue(ct_priv->wq);
+err_wq:
        rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
 err_ct_tuples_nat_ht:
        rhashtable_destroy(&ct_priv->ct_tuples_ht);
@@ -2213,6 +2216,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
        if (!ct_priv)
                return;
 
+       destroy_workqueue(ct_priv->wq);
        mlx5_ct_tc_remove_dbgfs(ct_priv);
        chains = ct_priv->chains;
 
index 0bb0633..27483aa 100644 (file)
@@ -231,8 +231,7 @@ mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx,
        struct mlx5e_ktls_offload_context_rx **ctx =
                __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);
 
-       BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) >
-                    TLS_OFFLOAD_CONTEXT_SIZE_RX);
+       BUILD_BUG_ON(sizeof(priv_rx) > TLS_DRIVER_STATE_SIZE_RX);
 
        *ctx = priv_rx;
 }
index 4b6f0d1..f239fb2 100644 (file)
@@ -68,8 +68,7 @@ mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
        struct mlx5e_ktls_offload_context_tx **ctx =
                __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
 
-       BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_tx *) >
-                    TLS_OFFLOAD_CONTEXT_SIZE_TX);
+       BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX);
 
        *ctx = priv_tx;
 }
index 57fa048..1e87bb2 100644 (file)
@@ -688,7 +688,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
        u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
        struct mlx5_core_dev *mdev = priv->mdev;
 
-       if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
+       if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
                return;
 
        MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
index 34bf11c..9ca2c87 100644 (file)
@@ -3793,7 +3793,7 @@ static bool is_lag_dev(struct mlx5e_priv *priv,
 
 static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
 {
-       if (mlx5e_eswitch_uplink_rep(out_dev) &&
+       if (same_hw_reps(priv, out_dev) &&
            MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&
            MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))
                return true;
@@ -4529,13 +4529,6 @@ static int mlx5e_policer_validate(const struct flow_action *action,
                return -EOPNOTSUPP;
        }
 
-       if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
-           act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
-               NL_SET_ERR_MSG_MOD(extack,
-                                  "Offload not supported when conform action is not pipe or ok");
-               return -EOPNOTSUPP;
-       }
-
        if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
            !flow_action_is_last_entry(action, act)) {
                NL_SET_ERR_MSG_MOD(extack,
@@ -4586,6 +4579,12 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
        flow_action_for_each(i, act, flow_action) {
                switch (act->id) {
                case FLOW_ACTION_POLICE:
+                       if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Offload not supported when conform action is not continue");
+                               return -EOPNOTSUPP;
+                       }
+
                        err = mlx5e_policer_validate(flow_action, act, extack);
                        if (err)
                                return err;
index 50d14ce..9a7250b 100644 (file)
@@ -341,6 +341,26 @@ static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
        }
 }
 
+static void mlx5e_tx_flush(struct mlx5e_txqsq *sq)
+{
+       struct mlx5e_tx_wqe_info *wi;
+       struct mlx5e_tx_wqe *wqe;
+       u16 pi;
+
+       /* Must not be called when a MPWQE session is active but empty. */
+       mlx5e_tx_mpwqe_ensure_complete(sq);
+
+       pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+       wi = &sq->db.wqe_info[pi];
+
+       *wi = (struct mlx5e_tx_wqe_info) {
+               .num_wqebbs = 1,
+       };
+
+       wqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
+       mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
+}
+
 static inline void
 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                     const struct mlx5e_tx_attr *attr,
@@ -459,6 +479,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 err_drop:
        stats->dropped++;
        dev_kfree_skb_any(skb);
+       mlx5e_tx_flush(sq);
 }
 
 static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
@@ -560,6 +581,13 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
        struct mlx5_wqe_ctrl_seg *cseg;
        struct mlx5e_xmit_data txd;
 
+       txd.data = skb->data;
+       txd.len = skb->len;
+
+       txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
+               goto err_unmap;
+
        if (!mlx5e_tx_mpwqe_session_is_active(sq)) {
                mlx5e_tx_mpwqe_session_start(sq, eseg);
        } else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) {
@@ -569,18 +597,9 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 
        sq->stats->xmit_more += xmit_more;
 
-       txd.data = skb->data;
-       txd.len = skb->len;
-
-       txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
-               goto err_unmap;
        mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
-
        mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
-
        mlx5e_tx_mpwqe_add_dseg(sq, &txd);
-
        mlx5e_tx_skb_update_hwts_flags(skb);
 
        if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
@@ -602,6 +621,7 @@ err_unmap:
        mlx5e_dma_unmap_wqe_err(sq, 1);
        sq->stats->dropped++;
        dev_kfree_skb_any(skb);
+       mlx5e_tx_flush(sq);
 }
 
 void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
@@ -1006,5 +1026,6 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 err_drop:
        stats->dropped++;
        dev_kfree_skb_any(skb);
+       mlx5e_tx_flush(sq);
 }
 #endif
index 9d17206..fabe49a 100644 (file)
@@ -11,6 +11,7 @@
 #include "mlx5_core.h"
 #include "eswitch.h"
 #include "fs_core.h"
+#include "fs_ft_pool.h"
 #include "esw/qos.h"
 
 enum {
@@ -95,8 +96,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
        if (!flow_group_in)
                return -ENOMEM;
 
-       table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
-       ft_attr.max_fte = table_size;
+       ft_attr.max_fte = POOL_NEXT_SIZE;
        ft_attr.prio = LEGACY_FDB_PRIO;
        fdb = mlx5_create_flow_table(root_ns, &ft_attr);
        if (IS_ERR(fdb)) {
@@ -105,6 +105,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
                goto out;
        }
        esw->fdb_table.legacy.fdb = fdb;
+       table_size = fdb->max_fte;
 
        /* Addresses group : Full match unicast/multicast addresses */
        MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
index 15e41dc..b8feaf0 100644 (file)
@@ -44,7 +44,7 @@ static int port_sel_mode_show(struct seq_file *file, void *priv)
        ldev = dev->priv.lag;
        mutex_lock(&ldev->lock);
        if (__mlx5_lag_is_active(ldev))
-               mode = mlx5_get_str_port_sel_mode(ldev);
+               mode = mlx5_get_str_port_sel_mode(ldev->mode, ldev->mode_flags);
        else
                ret = -EINVAL;
        mutex_unlock(&ldev->lock);
@@ -72,6 +72,7 @@ static int state_show(struct seq_file *file, void *priv)
 static int flags_show(struct seq_file *file, void *priv)
 {
        struct mlx5_core_dev *dev = file->private;
+       bool fdb_sel_mode_native;
        struct mlx5_lag *ldev;
        bool shared_fdb;
        bool lag_active;
@@ -79,14 +80,21 @@ static int flags_show(struct seq_file *file, void *priv)
        ldev = dev->priv.lag;
        mutex_lock(&ldev->lock);
        lag_active = __mlx5_lag_is_active(ldev);
-       if (lag_active)
-               shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
+       if (!lag_active)
+               goto unlock;
+
+       shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
+       fdb_sel_mode_native = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
+                                      &ldev->mode_flags);
 
+unlock:
        mutex_unlock(&ldev->lock);
        if (!lag_active)
                return -EINVAL;
 
        seq_printf(file, "%s:%s\n", "shared_fdb", shared_fdb ? "on" : "off");
+       seq_printf(file, "%s:%s\n", "fdb_selection_mode",
+                  fdb_sel_mode_native ? "native" : "affinity");
        return 0;
 }
 
index 2a8fc54..5d41e19 100644 (file)
@@ -68,14 +68,15 @@ static int get_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
 static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
                               unsigned long flags)
 {
-       bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
+       bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
+                                    &flags);
        int port_sel_mode = get_port_sel_mode(mode, flags);
        u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
        void *lag_ctx;
 
        lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
        MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
-       MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb);
+       MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);
        if (port_sel_mode == MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY) {
                MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
                MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
@@ -471,8 +472,13 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
        bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
 
        *flags = 0;
-       if (shared_fdb)
+       if (shared_fdb) {
                set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags);
+               set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
+       }
+
+       if (mode == MLX5_LAG_MODE_MPESW)
+               set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
 
        if (roce_lag)
                return mlx5_lag_set_port_sel_mode_roce(ldev, flags);
@@ -481,9 +487,9 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
        return 0;
 }
 
-char *mlx5_get_str_port_sel_mode(struct mlx5_lag *ldev)
+char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
 {
-       int port_sel_mode = get_port_sel_mode(ldev->mode, ldev->mode_flags);
+       int port_sel_mode = get_port_sel_mode(mode, flags);
 
        switch (port_sel_mode) {
        case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY: return "queue_affinity";
@@ -507,7 +513,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
        if (tracker)
                mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
        mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n",
-                      shared_fdb, mlx5_get_str_port_sel_mode(ldev));
+                      shared_fdb, mlx5_get_str_port_sel_mode(mode, flags));
 
        err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);
        if (err) {
index c81b173..ce2ce8c 100644 (file)
@@ -24,6 +24,7 @@ enum {
 enum {
        MLX5_LAG_MODE_FLAG_HASH_BASED,
        MLX5_LAG_MODE_FLAG_SHARED_FDB,
+       MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
 };
 
 enum mlx5_lag_mode {
@@ -114,7 +115,7 @@ bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev);
 void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev);
 int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev);
 
-char *mlx5_get_str_port_sel_mode(struct mlx5_lag *ldev);
+char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
 void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
                           u8 *ports, int *num_enabled);
 
index ee4b25a..f643202 100644 (file)
@@ -41,7 +41,6 @@ void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
 int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
 {
        struct mlx5_lag *ldev = dev->priv.lag;
-       bool shared_fdb;
        int err = 0;
 
        if (!ldev)
@@ -55,8 +54,8 @@ int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
                err = -EINVAL;
                goto out;
        }
-       shared_fdb = mlx5_shared_fdb_supported(ldev);
-       err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, shared_fdb);
+
+       err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
        if (err)
                mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
 
index a68d931..15c8d4d 100644 (file)
@@ -8,8 +8,8 @@
 #include "spectrum.h"
 
 enum mlxsw_sp_counter_sub_pool_id {
-       MLXSW_SP_COUNTER_SUB_POOL_FLOW,
        MLXSW_SP_COUNTER_SUB_POOL_RIF,
+       MLXSW_SP_COUNTER_SUB_POOL_FLOW,
 };
 
 int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
index 9dbb573..ce33dbd 100644 (file)
@@ -4415,6 +4415,8 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
        return 0;
 
 err_nexthop_neigh_init:
+       list_del(&nh->router_list_node);
+       mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
        mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
        return err;
 }
@@ -5382,7 +5384,7 @@ static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
 {
        const struct fib_nh *nh = fib_info_nh(fi, 0);
 
-       return nh->fib_nh_scope == RT_SCOPE_LINK ||
+       return nh->fib_nh_gw_family ||
               mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
 }
 
@@ -6740,6 +6742,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
                                  const struct fib6_info *rt)
 {
        struct net_device *dev = rt->fib6_nh->fib_nh_dev;
+       int err;
 
        nh->nhgi = nh_grp->nhgi;
        nh->nh_weight = rt->fib6_nh->fib_nh_weight;
@@ -6755,7 +6758,16 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
                return 0;
        nh->ifindex = dev->ifindex;
 
-       return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
+       err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
+       if (err)
+               goto err_nexthop_type_init;
+
+       return 0;
+
+err_nexthop_type_init:
+       list_del(&nh->router_list_node);
+       mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+       return err;
 }
 
 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
@@ -10312,7 +10324,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
        unsigned long *fields = config->fields;
        u32 hash_fields;
 
-       switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
+       switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
        case 0:
                mlxsw_sp_mp4_hash_outer_addr(config);
                break;
@@ -10330,7 +10342,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
                mlxsw_sp_mp_hash_inner_l3(config);
                break;
        case 3:
-               hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
+               hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
                /* Outer */
                MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
                MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
@@ -10511,13 +10523,14 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
 {
        struct net *net = mlxsw_sp_net(mlxsw_sp);
-       bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
        char rgcr_pl[MLXSW_REG_RGCR_LEN];
        u64 max_rifs;
+       bool usp;
 
        if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
                return -EIO;
        max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+       usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
 
        mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
        mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
index 005e56e..5893770 100644 (file)
@@ -75,6 +75,9 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
                               unsigned int vid,
                               enum macaccess_entry_type type)
 {
+       int ret;
+
+       spin_lock(&lan966x->mac_lock);
        lan966x_mac_select(lan966x, mac, vid);
 
        /* Issue a write command */
@@ -86,7 +89,10 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
               ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN),
               lan966x, ANA_MACACCESS);
 
-       return lan966x_mac_wait_for_completion(lan966x);
+       ret = lan966x_mac_wait_for_completion(lan966x);
+       spin_unlock(&lan966x->mac_lock);
+
+       return ret;
 }
 
 /* The mask of the front ports is encoded inside the mac parameter via a call
@@ -113,11 +119,13 @@ int lan966x_mac_learn(struct lan966x *lan966x, int port,
        return __lan966x_mac_learn(lan966x, port, false, mac, vid, type);
 }
 
-int lan966x_mac_forget(struct lan966x *lan966x,
-                      const unsigned char mac[ETH_ALEN],
-                      unsigned int vid,
-                      enum macaccess_entry_type type)
+static int lan966x_mac_forget_locked(struct lan966x *lan966x,
+                                    const unsigned char mac[ETH_ALEN],
+                                    unsigned int vid,
+                                    enum macaccess_entry_type type)
 {
+       lockdep_assert_held(&lan966x->mac_lock);
+
        lan966x_mac_select(lan966x, mac, vid);
 
        /* Issue a forget command */
@@ -128,6 +136,20 @@ int lan966x_mac_forget(struct lan966x *lan966x,
        return lan966x_mac_wait_for_completion(lan966x);
 }
 
+int lan966x_mac_forget(struct lan966x *lan966x,
+                      const unsigned char mac[ETH_ALEN],
+                      unsigned int vid,
+                      enum macaccess_entry_type type)
+{
+       int ret;
+
+       spin_lock(&lan966x->mac_lock);
+       ret = lan966x_mac_forget_locked(lan966x, mac, vid, type);
+       spin_unlock(&lan966x->mac_lock);
+
+       return ret;
+}
+
 int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid)
 {
        return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED);
@@ -161,7 +183,7 @@ static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *ma
 {
        struct lan966x_mac_entry *mac_entry;
 
-       mac_entry = kzalloc(sizeof(*mac_entry), GFP_KERNEL);
+       mac_entry = kzalloc(sizeof(*mac_entry), GFP_ATOMIC);
        if (!mac_entry)
                return NULL;
 
@@ -179,7 +201,6 @@ static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
        struct lan966x_mac_entry *res = NULL;
        struct lan966x_mac_entry *mac_entry;
 
-       spin_lock(&lan966x->mac_lock);
        list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
                if (mac_entry->vid == vid &&
                    ether_addr_equal(mac, mac_entry->mac) &&
@@ -188,7 +209,6 @@ static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
                        break;
                }
        }
-       spin_unlock(&lan966x->mac_lock);
 
        return res;
 }
@@ -231,8 +251,11 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
 {
        struct lan966x_mac_entry *mac_entry;
 
-       if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL))
+       spin_lock(&lan966x->mac_lock);
+       if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) {
+               spin_unlock(&lan966x->mac_lock);
                return 0;
+       }
 
        /* In case the entry already exists, don't add it again to SW,
         * just update HW, but we need to look in the actual HW because
@@ -241,21 +264,25 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
         * add the entry but without the extern_learn flag.
         */
        mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port);
-       if (mac_entry)
-               return lan966x_mac_learn(lan966x, port->chip_port,
-                                        addr, vid, ENTRYTYPE_LOCKED);
+       if (mac_entry) {
+               spin_unlock(&lan966x->mac_lock);
+               goto mac_learn;
+       }
 
        mac_entry = lan966x_mac_alloc_entry(addr, vid, port->chip_port);
-       if (!mac_entry)
+       if (!mac_entry) {
+               spin_unlock(&lan966x->mac_lock);
                return -ENOMEM;
+       }
 
-       spin_lock(&lan966x->mac_lock);
        list_add_tail(&mac_entry->list, &lan966x->mac_entries);
        spin_unlock(&lan966x->mac_lock);
 
-       lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
        lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, port->dev);
 
+mac_learn:
+       lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
+
        return 0;
 }
 
@@ -269,8 +296,9 @@ int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
                                 list) {
                if (mac_entry->vid == vid &&
                    ether_addr_equal(addr, mac_entry->mac)) {
-                       lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid,
-                                          ENTRYTYPE_LOCKED);
+                       lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+                                                 mac_entry->vid,
+                                                 ENTRYTYPE_LOCKED);
 
                        list_del(&mac_entry->list);
                        kfree(mac_entry);
@@ -288,8 +316,8 @@ void lan966x_mac_purge_entries(struct lan966x *lan966x)
        spin_lock(&lan966x->mac_lock);
        list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
                                 list) {
-               lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid,
-                                  ENTRYTYPE_LOCKED);
+               lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+                                         mac_entry->vid, ENTRYTYPE_LOCKED);
 
                list_del(&mac_entry->list);
                kfree(mac_entry);
@@ -325,10 +353,13 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
 {
        struct lan966x_mac_entry *mac_entry, *tmp;
        unsigned char mac[ETH_ALEN] __aligned(2);
+       struct list_head mac_deleted_entries;
        u32 dest_idx;
        u32 column;
        u16 vid;
 
+       INIT_LIST_HEAD(&mac_deleted_entries);
+
        spin_lock(&lan966x->mac_lock);
        list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) {
                bool found = false;
@@ -362,20 +393,26 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
                }
 
                if (!found) {
-                       /* Notify the bridge that the entry doesn't exist
-                        * anymore in the HW and remove the entry from the SW
-                        * list
-                        */
-                       lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
-                                             mac_entry->mac, mac_entry->vid,
-                                             lan966x->ports[mac_entry->port_index]->dev);
-
                        list_del(&mac_entry->list);
-                       kfree(mac_entry);
+                       /* Move the entry from SW list to a tmp list such that
+                        * it would be deleted later
+                        */
+                       list_add_tail(&mac_entry->list, &mac_deleted_entries);
                }
        }
        spin_unlock(&lan966x->mac_lock);
 
+       list_for_each_entry_safe(mac_entry, tmp, &mac_deleted_entries, list) {
+               /* Notify the bridge that the entry doesn't exist
+                * anymore in the HW
+                */
+               lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+                                     mac_entry->mac, mac_entry->vid,
+                                     lan966x->ports[mac_entry->port_index]->dev);
+               list_del(&mac_entry->list);
+               kfree(mac_entry);
+       }
+
        /* Now go to the list of columns and see if any entry was not in the SW
         * list, then that means that the entry is new so it needs to notify the
         * bridge.
@@ -396,13 +433,20 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
                if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
                        continue;
 
+               spin_lock(&lan966x->mac_lock);
+               mac_entry = lan966x_mac_find_entry(lan966x, mac, vid, dest_idx);
+               if (mac_entry) {
+                       spin_unlock(&lan966x->mac_lock);
+                       continue;
+               }
+
                mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
-               if (!mac_entry)
+               if (!mac_entry) {
+                       spin_unlock(&lan966x->mac_lock);
                        return;
+               }
 
                mac_entry->row = row;
-
-               spin_lock(&lan966x->mac_lock);
                list_add_tail(&mac_entry->list, &lan966x->mac_entries);
                spin_unlock(&lan966x->mac_lock);
 
@@ -424,6 +468,7 @@ irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
               lan966x, ANA_MACTINDX);
 
        while (1) {
+               spin_lock(&lan966x->mac_lock);
                lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT),
                        ANA_MACACCESS_MAC_TABLE_CMD,
                        lan966x, ANA_MACACCESS);
@@ -447,12 +492,15 @@ irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
                        stop = false;
 
                if (column == LAN966X_MAC_COLUMNS - 1 &&
-                   index == 0 && stop)
+                   index == 0 && stop) {
+                       spin_unlock(&lan966x->mac_lock);
                        break;
+               }
 
                entry[column].mach = lan_rd(lan966x, ANA_MACHDATA);
                entry[column].macl = lan_rd(lan966x, ANA_MACLDATA);
                entry[column].maca = lan_rd(lan966x, ANA_MACACCESS);
+               spin_unlock(&lan966x->mac_lock);
 
                /* Once all the columns are read process them */
                if (column == LAN966X_MAC_COLUMNS - 1) {
index 5784c41..1d6e3b6 100644 (file)
@@ -994,7 +994,7 @@ static int lan966x_probe(struct platform_device *pdev)
        struct fwnode_handle *ports, *portnp;
        struct lan966x *lan966x;
        u8 mac_addr[ETH_ALEN];
-       int err, i;
+       int err;
 
        lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL);
        if (!lan966x)
@@ -1025,11 +1025,7 @@ static int lan966x_probe(struct platform_device *pdev)
        if (err)
                return dev_err_probe(&pdev->dev, err, "Reset failed");
 
-       i = 0;
-       fwnode_for_each_available_child_node(ports, portnp)
-               ++i;
-
-       lan966x->num_phys_ports = i;
+       lan966x->num_phys_ports = NUM_PHYS_PORTS;
        lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports,
                                      sizeof(struct lan966x_port *),
                                      GFP_KERNEL);
index 3b86ddd..2787055 100644 (file)
@@ -34,6 +34,7 @@
 /* Reserved amount for (SRC, PRIO) at index 8*SRC + PRIO */
 #define QSYS_Q_RSRV                    95
 
+#define NUM_PHYS_PORTS                 8
 #define CPU_PORT                       8
 
 /* Reserved PGIDs */
index 3429660..5edc8b7 100644 (file)
@@ -396,6 +396,9 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev,
        u32 mact_entry;
        int res, err;
 
+       if (!sparx5_netdevice_check(dev))
+               return -EOPNOTSUPP;
+
        if (netif_is_bridge_master(v->obj.orig_dev)) {
                sparx5_mact_learn(spx5, PGID_CPU, v->addr, v->vid);
                return 0;
@@ -466,6 +469,9 @@ static int sparx5_handle_port_mdb_del(struct net_device *dev,
        u32 mact_entry, res, pgid_entry[3];
        int err;
 
+       if (!sparx5_netdevice_check(dev))
+               return -EOPNOTSUPP;
+
        if (netif_is_bridge_master(v->obj.orig_dev)) {
                sparx5_mact_forget(spx5, v->addr, v->vid);
                return 0;
index 083fddd..8e3894c 100644 (file)
@@ -94,19 +94,18 @@ static void ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma,
        ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan));
 }
 
+static u32 ocelot_fdma_read_ch_safe(struct ocelot *ocelot)
+{
+       return ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE);
+}
+
 static int ocelot_fdma_wait_chan_safe(struct ocelot *ocelot, int chan)
 {
-       unsigned long timeout;
        u32 safe;
 
-       timeout = jiffies + usecs_to_jiffies(OCELOT_FDMA_CH_SAFE_TIMEOUT_US);
-       do {
-               safe = ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE);
-               if (safe & BIT(chan))
-                       return 0;
-       } while (time_after(jiffies, timeout));
-
-       return -ETIMEDOUT;
+       return readx_poll_timeout_atomic(ocelot_fdma_read_ch_safe, ocelot, safe,
+                                        safe & BIT(chan), 0,
+                                        OCELOT_FDMA_CH_SAFE_TIMEOUT_US);
 }
 
 static void ocelot_fdma_dcb_set_data(struct ocelot_fdma_dcb *dcb,
index e31f8fb..df2ab5c 100644 (file)
@@ -4233,7 +4233,7 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
                        }
 
                        /* If the chain is ended by an load/store pair then this
-                        * could serve as the new head of the the next chain.
+                        * could serve as the new head of the next chain.
                         */
                        if (curr_pair_is_memcpy(meta1, meta2)) {
                                head_ld_meta = meta1;
index 0147de4..ffb6f6d 100644 (file)
@@ -474,7 +474,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
                        set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
                        ip_rt_put(rt);
                } else {
-                       set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
+                       set_tun->ttl = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
                }
        }
 
index 6bf3ec4..97dcf8d 100644 (file)
@@ -447,7 +447,8 @@ void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app,
 
 static void
 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
-                   void *flow, struct neighbour *neigh, bool is_ipv6)
+                   void *flow, struct neighbour *neigh, bool is_ipv6,
+                   bool override)
 {
        bool neigh_invalid = !(neigh->nud_state & NUD_VALID) || neigh->dead;
        size_t neigh_size = is_ipv6 ? sizeof(struct nfp_tun_neigh_v6) :
@@ -546,6 +547,13 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
                if (nn_entry->flow)
                        list_del(&nn_entry->list_head);
                kfree(nn_entry);
+       } else if (nn_entry && !neigh_invalid && override) {
+               mtype = is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
+                               NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
+               nfp_tun_link_predt_entries(app, nn_entry);
+               nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
+                                        nn_entry->payload,
+                                        GFP_ATOMIC);
        }
 
        spin_unlock_bh(&priv->predt_lock);
@@ -610,7 +618,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
 
                        dst_release(dst);
                }
-               nfp_tun_write_neigh(n->dev, app, &flow6, n, true);
+               nfp_tun_write_neigh(n->dev, app, &flow6, n, true, false);
 #else
                return NOTIFY_DONE;
 #endif /* CONFIG_IPV6 */
@@ -633,7 +641,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
 
                        ip_rt_put(rt);
                }
-               nfp_tun_write_neigh(n->dev, app, &flow4, n, false);
+               nfp_tun_write_neigh(n->dev, app, &flow4, n, false, false);
        }
 #else
        return NOTIFY_DONE;
@@ -676,7 +684,7 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
        ip_rt_put(rt);
        if (!n)
                goto fail_rcu_unlock;
-       nfp_tun_write_neigh(n->dev, app, &flow, n, false);
+       nfp_tun_write_neigh(n->dev, app, &flow, n, false, true);
        neigh_release(n);
        rcu_read_unlock();
        return;
@@ -718,7 +726,7 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
        if (!n)
                goto fail_rcu_unlock;
 
-       nfp_tun_write_neigh(n->dev, app, &flow, n, true);
+       nfp_tun_write_neigh(n->dev, app, &flow, n, true, true);
        neigh_release(n);
        rcu_read_unlock();
        return;
index e509d6d..805071d 100644 (file)
@@ -125,17 +125,18 @@ nfp_nfdk_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
 
 static int
 nfp_nfdk_tx_maybe_close_block(struct nfp_net_tx_ring *tx_ring,
-                             unsigned int nr_frags, struct sk_buff *skb)
+                             struct sk_buff *skb)
 {
        unsigned int n_descs, wr_p, nop_slots;
        const skb_frag_t *frag, *fend;
        struct nfp_nfdk_tx_desc *txd;
+       unsigned int nr_frags;
        unsigned int wr_idx;
        int err;
 
 recount_descs:
        n_descs = nfp_nfdk_headlen_to_segs(skb_headlen(skb));
-
+       nr_frags = skb_shinfo(skb)->nr_frags;
        frag = skb_shinfo(skb)->frags;
        fend = frag + nr_frags;
        for (; frag < fend; frag++)
@@ -281,10 +282,13 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
        if (unlikely((int)metadata < 0))
                goto err_flush;
 
-       nr_frags = skb_shinfo(skb)->nr_frags;
-       if (nfp_nfdk_tx_maybe_close_block(tx_ring, nr_frags, skb))
+       if (nfp_nfdk_tx_maybe_close_block(tx_ring, skb))
                goto err_flush;
 
+       /* nr_frags will change after skb_linearize so we get nr_frags after
+        * nfp_nfdk_tx_maybe_close_block function
+        */
+       nr_frags = skb_shinfo(skb)->nr_frags;
        /* DMA map all */
        wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
        txd = &tx_ring->ktxds[wr_idx];
@@ -310,7 +314,16 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
 
        /* FIELD_PREP() implicitly truncates to chunk */
        dma_len -= 1;
-       dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+
+       /* We will do our best to pass as much data as we can in descriptor
+        * and we need to make sure the first descriptor includes whole head
+        * since there is limitation in firmware side. Sometimes the value of
+        * dma_len bitwise and NFDK_DESC_TX_DMA_LEN_HEAD will less than
+        * headlen.
+        */
+       dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD,
+                              dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ?
+                              NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |
                    FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
 
        txd->dma_len_type = cpu_to_le16(dlen_type);
@@ -925,7 +938,9 @@ nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
 
        /* FIELD_PREP() implicitly truncates to chunk */
        dma_len -= 1;
-       dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+       dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD,
+                              dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ?
+                              NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |
                    FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
 
        txd->dma_len_type = cpu_to_le16(dlen_type);
@@ -1303,7 +1318,7 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
                                   skb_push(skb, 4));
        }
 
-       if (nfp_nfdk_tx_maybe_close_block(tx_ring, 0, skb))
+       if (nfp_nfdk_tx_maybe_close_block(tx_ring, skb))
                goto err_free;
 
        /* DMA map all */
@@ -1328,7 +1343,9 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
        txbuf++;
 
        dma_len -= 1;
-       dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+       dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD,
+                              dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ?
+                              NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |
                    FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
 
        txd->dma_len_type = cpu_to_le16(dlen_type);
index 3098d66..1b7fdb4 100644 (file)
@@ -4190,7 +4190,6 @@ static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
 static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
                                struct sk_buff *skb, u32 *opts)
 {
-       u32 transport_offset = (u32)skb_transport_offset(skb);
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        u32 mss = shinfo->gso_size;
 
@@ -4207,7 +4206,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
                        WARN_ON_ONCE(1);
                }
 
-               opts[0] |= transport_offset << GTTCPHO_SHIFT;
+               opts[0] |= skb_transport_offset(skb) << GTTCPHO_SHIFT;
                opts[1] |= mss << TD1_MSS_SHIFT;
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
                u8 ip_protocol;
@@ -4235,7 +4234,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
                else
                        WARN_ON_ONCE(1);
 
-               opts[1] |= transport_offset << TCPHO_SHIFT;
+               opts[1] |= skb_transport_offset(skb) << TCPHO_SHIFT;
        } else {
                unsigned int padto = rtl_quirk_packet_padto(tp, skb);
 
@@ -4402,14 +4401,13 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
                                                struct net_device *dev,
                                                netdev_features_t features)
 {
-       int transport_offset = skb_transport_offset(skb);
        struct rtl8169_private *tp = netdev_priv(dev);
 
        if (skb_is_gso(skb)) {
                if (tp->mac_version == RTL_GIGA_MAC_VER_34)
                        features = rtl8168evl_fix_tso(skb, features);
 
-               if (transport_offset > GTTCPHO_MAX &&
+               if (skb_transport_offset(skb) > GTTCPHO_MAX &&
                    rtl_chip_supports_csum_v2(tp))
                        features &= ~NETIF_F_ALL_TSO;
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -4420,7 +4418,7 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
                if (rtl_quirk_packet_padto(tp, skb))
                        features &= ~NETIF_F_CSUM_MASK;
 
-               if (transport_offset > TCPHO_MAX &&
+               if (skb_transport_offset(skb) > TCPHO_MAX &&
                    rtl_chip_supports_csum_v2(tp))
                        features &= ~NETIF_F_CSUM_MASK;
        }
index 186cb28..8b62ce2 100644 (file)
@@ -1932,7 +1932,10 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
 
        efx_update_sw_stats(efx, stats);
 out:
+       /* releasing a DMA coherent buffer with BH disabled can panic */
+       spin_unlock_bh(&efx->stats_lock);
        efx_nic_free_buffer(efx, &stats_buf);
+       spin_lock_bh(&efx->stats_lock);
        return rc;
 }
 
index 7f5aa4a..92550c7 100644 (file)
@@ -408,8 +408,9 @@ fail1:
 static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
 {
        struct pci_dev *dev = efx->pci_dev;
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
        unsigned int vfs_assigned = pci_vfs_assigned(dev);
-       int rc = 0;
+       int i, rc = 0;
 
        if (vfs_assigned && !force) {
                netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
@@ -417,10 +418,13 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
                return -EBUSY;
        }
 
-       if (!vfs_assigned)
+       if (!vfs_assigned) {
+               for (i = 0; i < efx->vf_count; i++)
+                       nic_data->vf[i].pci_dev = NULL;
                pci_disable_sriov(dev);
-       else
+       } else {
                rc = -EBUSY;
+       }
 
        efx_ef10_sriov_free_vf_vswitching(efx);
        efx->vf_count = 0;
index 4625f85..10ad0b9 100644 (file)
@@ -1100,7 +1100,29 @@ static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
 
        tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type);
        if (tx_queue && tx_queue->timestamping) {
+               /* This code invokes normal driver TX code which is always
+                * protected from softirqs when called from generic TX code,
+                * which in turn disables preemption. Look at __dev_queue_xmit
+                * which uses rcu_read_lock_bh disabling preemption for RCU
+                * plus disabling softirqs. We do not need RCU reader
+                * protection here.
+                *
+                * Although it is theoretically safe for current PTP TX/RX code
+                * running without disabling softirqs, there are three good
+                * reasond for doing so:
+                *
+                *      1) The code invoked is mainly implemented for non-PTP
+                *         packets and it is always executed with softirqs
+                *         disabled.
+                *      2) This being a single PTP packet, better to not
+                *         interrupt its processing by softirqs which can lead
+                *         to high latencies.
+                *      3) netdev_xmit_more checks preemption is disabled and
+                *         triggers a BUG_ON if not.
+                */
+               local_bh_disable();
                efx_enqueue_skb(tx_queue, skb);
+               local_bh_enable();
        } else {
                WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
                dev_kfree_skb_any(skb);
index a0654e8..0329caf 100644 (file)
@@ -1515,14 +1515,14 @@ static void epic_remove_one(struct pci_dev *pdev)
        struct net_device *dev = pci_get_drvdata(pdev);
        struct epic_private *ep = netdev_priv(dev);
 
+       unregister_netdev(dev);
        dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
                          ep->tx_ring_dma);
        dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
                          ep->rx_ring_dma);
-       unregister_netdev(dev);
        pci_iounmap(pdev, ep->ioaddr);
-       pci_release_regions(pdev);
        free_netdev(dev);
+       pci_release_regions(pdev);
        pci_disable_device(pdev);
        /* pci_power_off(pdev, -1); */
 }
index bc91fd8..358fc26 100644 (file)
@@ -361,6 +361,7 @@ bypass_clk_reset_gpio:
        data->fix_mac_speed = tegra_eqos_fix_speed;
        data->init = tegra_eqos_init;
        data->bsp_priv = eqos;
+       data->sph_disable = 1;
 
        err = tegra_eqos_init(pdev, eqos);
        if (err < 0)
index 9a6d819..378b4dd 100644 (file)
@@ -273,7 +273,8 @@ static int ingenic_mac_probe(struct platform_device *pdev)
                        mac->tx_delay = tx_delay_ps * 1000;
                } else {
                        dev_err(&pdev->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto err_remove_config_dt;
                }
        }
 
@@ -283,7 +284,8 @@ static int ingenic_mac_probe(struct platform_device *pdev)
                        mac->rx_delay = rx_delay_ps * 1000;
                } else {
                        dev_err(&pdev->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto err_remove_config_dt;
                }
        }
 
index 38fe77d..3fe720c 100644 (file)
@@ -298,6 +298,11 @@ static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
        *art_time = ns;
 }
 
+static int stmmac_cross_ts_isr(struct stmmac_priv *priv)
+{
+       return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE);
+}
+
 static int intel_crosststamp(ktime_t *device,
                             struct system_counterval_t *system,
                             void *ctx)
@@ -313,8 +318,6 @@ static int intel_crosststamp(ktime_t *device,
        u32 num_snapshot;
        u32 gpio_value;
        u32 acr_value;
-       int ret;
-       u32 v;
        int i;
 
        if (!boot_cpu_has(X86_FEATURE_ART))
@@ -328,6 +331,8 @@ static int intel_crosststamp(ktime_t *device,
        if (priv->plat->ext_snapshot_en)
                return -EBUSY;
 
+       priv->plat->int_snapshot_en = 1;
+
        mutex_lock(&priv->aux_ts_lock);
        /* Enable Internal snapshot trigger */
        acr_value = readl(ptpaddr + PTP_ACR);
@@ -347,6 +352,7 @@ static int intel_crosststamp(ktime_t *device,
                break;
        default:
                mutex_unlock(&priv->aux_ts_lock);
+               priv->plat->int_snapshot_en = 0;
                return -EINVAL;
        }
        writel(acr_value, ptpaddr + PTP_ACR);
@@ -368,13 +374,12 @@ static int intel_crosststamp(ktime_t *device,
        gpio_value |= GMAC_GPO1;
        writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
 
-       /* Poll for time sync operation done */
-       ret = readl_poll_timeout(priv->ioaddr + GMAC_INT_STATUS, v,
-                                (v & GMAC_INT_TSIE), 100, 10000);
-
-       if (ret == -ETIMEDOUT) {
-               pr_err("%s: Wait for time sync operation timeout\n", __func__);
-               return ret;
+       /* Time sync done Indication - Interrupt method */
+       if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait,
+                                             stmmac_cross_ts_isr(priv),
+                                             HZ / 100)) {
+               priv->plat->int_snapshot_en = 0;
+               return -ETIMEDOUT;
        }
 
        num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
@@ -392,6 +397,7 @@ static int intel_crosststamp(ktime_t *device,
        }
 
        system->cycles *= intel_priv->crossts_adj;
+       priv->plat->int_snapshot_en = 0;
 
        return 0;
 }
@@ -576,6 +582,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
 
        plat->has_crossts = true;
        plat->crosststamp = intel_crosststamp;
+       plat->int_snapshot_en = 0;
 
        /* Setup MSI vector offset specific to Intel mGbE controller */
        plat->msi_mac_vec = 29;
index 6ff88df..d42e1af 100644 (file)
@@ -576,32 +576,7 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
                }
        }
 
-       ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks);
-       if (ret) {
-               dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
-               return ret;
-       }
-
-       ret = clk_prepare_enable(plat->rmii_internal_clk);
-       if (ret) {
-               dev_err(plat->dev, "failed to enable rmii internal clk, err = %d\n", ret);
-               goto err_clk;
-       }
-
        return 0;
-
-err_clk:
-       clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
-       return ret;
-}
-
-static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv)
-{
-       struct mediatek_dwmac_plat_data *plat = priv;
-       const struct mediatek_dwmac_variant *variant = plat->variant;
-
-       clk_disable_unprepare(plat->rmii_internal_clk);
-       clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
 }
 
 static int mediatek_dwmac_clks_config(void *priv, bool enabled)
@@ -643,7 +618,6 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
        plat->addr64 = priv_plat->variant->dma_bit_mask;
        plat->bsp_priv = priv_plat;
        plat->init = mediatek_dwmac_init;
-       plat->exit = mediatek_dwmac_exit;
        plat->clks_config = mediatek_dwmac_clks_config;
        if (priv_plat->variant->dwmac_fix_mac_speed)
                plat->fix_mac_speed = priv_plat->variant->dwmac_fix_mac_speed;
@@ -712,13 +686,33 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
        mediatek_dwmac_common_data(pdev, plat_dat, priv_plat);
        mediatek_dwmac_init(pdev, priv_plat);
 
+       ret = mediatek_dwmac_clks_config(priv_plat, true);
+       if (ret)
+               goto err_remove_config_dt;
+
        ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
-       if (ret) {
-               stmmac_remove_config_dt(pdev, plat_dat);
-               return ret;
-       }
+       if (ret)
+               goto err_drv_probe;
 
        return 0;
+
+err_drv_probe:
+       mediatek_dwmac_clks_config(priv_plat, false);
+err_remove_config_dt:
+       stmmac_remove_config_dt(pdev, plat_dat);
+
+       return ret;
+}
+
+static int mediatek_dwmac_remove(struct platform_device *pdev)
+{
+       struct mediatek_dwmac_plat_data *priv_plat = get_stmmac_bsp_priv(&pdev->dev);
+       int ret;
+
+       ret = stmmac_pltfr_remove(pdev);
+       mediatek_dwmac_clks_config(priv_plat, false);
+
+       return ret;
 }
 
 static const struct of_device_id mediatek_dwmac_match[] = {
@@ -733,7 +727,7 @@ MODULE_DEVICE_TABLE(of, mediatek_dwmac_match);
 
 static struct platform_driver mediatek_dwmac_driver = {
        .probe  = mediatek_dwmac_probe,
-       .remove = stmmac_pltfr_remove,
+       .remove = mediatek_dwmac_remove,
        .driver = {
                .name           = "dwmac-mediatek",
                .pm             = &stmmac_pltfr_pm_ops,
index 462ca7e..71dad40 100644 (file)
 #define        GMAC_PCS_IRQ_DEFAULT    (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
                                 GMAC_INT_PCS_ANE)
 
-#define        GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN)
+#define        GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN | \
+                                GMAC_INT_TSIE)
 
 enum dwmac4_irq_status {
        time_stamp_irq = 0x00001000,
index fd41db6..d8f1fbc 100644 (file)
@@ -23,6 +23,7 @@
 static void dwmac4_core_init(struct mac_device_info *hw,
                             struct net_device *dev)
 {
+       struct stmmac_priv *priv = netdev_priv(dev);
        void __iomem *ioaddr = hw->pcsr;
        u32 value = readl(ioaddr + GMAC_CONFIG);
 
@@ -58,6 +59,9 @@ static void dwmac4_core_init(struct mac_device_info *hw,
                value |= GMAC_INT_FPE_EN;
 
        writel(value, ioaddr + GMAC_INT_EN);
+
+       if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
+               init_waitqueue_head(&priv->tstamp_busy_wait);
 }
 
 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
@@ -219,6 +223,9 @@ static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
        if (queue == 0 || queue == 4) {
                value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
                value |= MTL_RXQ_DMA_Q04MDMACH(chan);
+       } else if (queue > 4) {
+               value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
+               value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
        } else {
                value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
                value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
index 57970ae..f9e8396 100644 (file)
@@ -266,6 +266,7 @@ struct stmmac_priv {
        rwlock_t ptp_lock;
        /* Protects auxiliary snapshot registers from concurrent access. */
        struct mutex aux_ts_lock;
+       wait_queue_head_t tstamp_busy_wait;
 
        void __iomem *mmcaddr;
        void __iomem *ptpaddr;
index abfb3cd..9c3055e 100644 (file)
@@ -803,14 +803,6 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
                netdev_warn(priv->dev,
                            "Setting EEE tx-lpi is not supported\n");
 
-       if (priv->hw->xpcs) {
-               ret = xpcs_config_eee(priv->hw->xpcs,
-                                     priv->plat->mult_fact_100ns,
-                                     edata->eee_enabled);
-               if (ret)
-                       return ret;
-       }
-
        if (!edata->eee_enabled)
                stmmac_disable_eee_mode(priv);
 
index 92d3294..764832f 100644 (file)
@@ -179,6 +179,11 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
        u64 ptp_time;
        int i;
 
+       if (priv->plat->int_snapshot_en) {
+               wake_up(&priv->tstamp_busy_wait);
+               return;
+       }
+
        tsync_int = readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE;
 
        if (!tsync_int)
index d1a7cf4..c5f3363 100644 (file)
@@ -834,19 +834,10 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
        struct timespec64 now;
        u32 sec_inc = 0;
        u64 temp = 0;
-       int ret;
 
        if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
                return -EOPNOTSUPP;
 
-       ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
-       if (ret < 0) {
-               netdev_warn(priv->dev,
-                           "failed to enable PTP reference clock: %pe\n",
-                           ERR_PTR(ret));
-               return ret;
-       }
-
        stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
        priv->systime_flags = systime_flags;
 
@@ -3270,6 +3261,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
 
        stmmac_mmc_setup(priv);
 
+       if (ptp_register) {
+               ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+               if (ret < 0)
+                       netdev_warn(priv->dev,
+                                   "failed to enable PTP reference clock: %pe\n",
+                                   ERR_PTR(ret));
+       }
+
        ret = stmmac_init_ptp(priv);
        if (ret == -EOPNOTSUPP)
                netdev_info(priv->dev, "PTP not supported by HW\n");
@@ -7213,8 +7212,6 @@ int stmmac_dvr_remove(struct device *dev)
        netdev_info(priv->dev, "%s: removing driver", __func__);
 
        pm_runtime_get_sync(dev);
-       pm_runtime_disable(dev);
-       pm_runtime_put_noidle(dev);
 
        stmmac_stop_all_dma(priv);
        stmmac_mac_set(priv, priv->ioaddr, false);
@@ -7241,6 +7238,9 @@ int stmmac_dvr_remove(struct device *dev)
        mutex_destroy(&priv->lock);
        bitmap_free(priv->af_xdp_zc_qps);
 
+       pm_runtime_disable(dev);
+       pm_runtime_put_noidle(dev);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
index 11e1055..9f5cac4 100644 (file)
@@ -815,7 +815,13 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
                if (ret)
                        return ret;
 
-               stmmac_init_tstamp_counter(priv, priv->systime_flags);
+               ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+               if (ret < 0) {
+                       netdev_warn(priv->dev,
+                                   "failed to enable PTP reference clock: %pe\n",
+                                   ERR_PTR(ret));
+                       return ret;
+               }
        }
 
        return 0;
index e45fb19..4d11980 100644 (file)
@@ -175,11 +175,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
        struct stmmac_priv *priv =
            container_of(ptp, struct stmmac_priv, ptp_clock_ops);
        void __iomem *ptpaddr = priv->ptpaddr;
-       void __iomem *ioaddr = priv->hw->pcsr;
        struct stmmac_pps_cfg *cfg;
-       u32 intr_value, acr_value;
        int ret = -EOPNOTSUPP;
        unsigned long flags;
+       u32 acr_value;
 
        switch (rq->type) {
        case PTP_CLK_REQ_PEROUT:
@@ -213,19 +212,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
                        netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n",
                                   priv->plat->ext_snapshot_num >>
                                   PTP_ACR_ATSEN_SHIFT);
-                       /* Enable Timestamp Interrupt */
-                       intr_value = readl(ioaddr + GMAC_INT_EN);
-                       intr_value |= GMAC_INT_TSIE;
-                       writel(intr_value, ioaddr + GMAC_INT_EN);
-
                } else {
                        netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n",
                                   priv->plat->ext_snapshot_num >>
                                   PTP_ACR_ATSEN_SHIFT);
-                       /* Disable Timestamp Interrupt */
-                       intr_value = readl(ioaddr + GMAC_INT_EN);
-                       intr_value &= ~GMAC_INT_TSIE;
-                       writel(intr_value, ioaddr + GMAC_INT_EN);
                }
                writel(acr_value, ptpaddr + PTP_ACR);
                mutex_unlock(&priv->aux_ts_lock);
index 77e5dff..8594ee8 100644 (file)
@@ -545,43 +545,24 @@ static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
 
 static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
 {
-       printk(KERN_INFO "%s: Link is up using ", hp->dev->name);
-       if (hp->tcvr_type == external)
-               printk("external ");
-       else
-               printk("internal ");
-       printk("transceiver at ");
        hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
-       if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
-               if (hp->sw_lpa & LPA_100FULL)
-                       printk("100Mb/s, Full Duplex.\n");
-               else
-                       printk("100Mb/s, Half Duplex.\n");
-       } else {
-               if (hp->sw_lpa & LPA_10FULL)
-                       printk("10Mb/s, Full Duplex.\n");
-               else
-                       printk("10Mb/s, Half Duplex.\n");
-       }
+
+       netdev_info(hp->dev,
+                   "Link is up using %s transceiver at %dMb/s, %s Duplex.\n",
+                   hp->tcvr_type == external ? "external" : "internal",
+                   hp->sw_lpa & (LPA_100HALF | LPA_100FULL) ? 100 : 10,
+                   hp->sw_lpa & (LPA_100FULL | LPA_10FULL) ? "Full" : "Half");
 }
 
 static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
 {
-       printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name);
-       if (hp->tcvr_type == external)
-               printk("external ");
-       else
-               printk("internal ");
-       printk("transceiver at ");
        hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
-       if (hp->sw_bmcr & BMCR_SPEED100)
-               printk("100Mb/s, ");
-       else
-               printk("10Mb/s, ");
-       if (hp->sw_bmcr & BMCR_FULLDPLX)
-               printk("Full Duplex.\n");
-       else
-               printk("Half Duplex.\n");
+
+       netdev_info(hp->dev,
+                   "Link has been forced up using %s transceiver at %dMb/s, %s Duplex.\n",
+                   hp->tcvr_type == external ? "external" : "internal",
+                   hp->sw_bmcr & BMCR_SPEED100 ? 100 : 10,
+                   hp->sw_bmcr & BMCR_FULLDPLX ? "Full" : "Half");
 }
 
 static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
index fb92d4c..f4a6b59 100644 (file)
@@ -2467,7 +2467,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
                                port->port_id, ret);
                        goto dl_port_unreg;
                }
-               devlink_port_type_eth_set(dl_port, port->ndev);
        }
        devlink_register(common->devlink);
        return ret;
@@ -2511,6 +2510,7 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
 static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
 {
        struct device *dev = common->dev;
+       struct devlink_port *dl_port;
        struct am65_cpsw_port *port;
        int ret = 0, i;
 
@@ -2527,6 +2527,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
                return ret;
        }
 
+       ret = am65_cpsw_nuss_register_devlink(common);
+       if (ret)
+               return ret;
+
        for (i = 0; i < common->port_num; i++) {
                port = &common->ports[i];
 
@@ -2539,25 +2543,24 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
                                i, ret);
                        goto err_cleanup_ndev;
                }
+
+               dl_port = &port->devlink_port;
+               devlink_port_type_eth_set(dl_port, port->ndev);
        }
 
        ret = am65_cpsw_register_notifiers(common);
        if (ret)
                goto err_cleanup_ndev;
 
-       ret = am65_cpsw_nuss_register_devlink(common);
-       if (ret)
-               goto clean_unregister_notifiers;
-
        /* can't auto unregister ndev using devm_add_action() due to
         * devres release sequence in DD core for DMA
         */
 
        return 0;
-clean_unregister_notifiers:
-       am65_cpsw_unregister_notifiers(common);
+
 err_cleanup_ndev:
        am65_cpsw_nuss_cleanup_ndev(common);
+       am65_cpsw_unregister_devlink(common);
 
        return ret;
 }
index 4225efb..f2e2261 100644 (file)
@@ -547,6 +547,57 @@ static inline void axienet_iow(struct axienet_local *lp, off_t offset,
        iowrite32(value, lp->regs + offset);
 }
 
+/**
+ * axienet_dma_out32 - Memory mapped Axi DMA register write.
+ * @lp:                Pointer to axienet local structure
+ * @reg:       Address offset from the base address of the Axi DMA core
+ * @value:     Value to be written into the Axi DMA register
+ *
+ * This function writes the desired value into the corresponding Axi DMA
+ * register.
+ */
+
+static inline void axienet_dma_out32(struct axienet_local *lp,
+                                    off_t reg, u32 value)
+{
+       iowrite32(value, lp->dma_regs + reg);
+}
+
+#if defined(CONFIG_64BIT) && defined(iowrite64)
+/**
+ * axienet_dma_out64 - Memory mapped Axi DMA register write.
+ * @lp:                Pointer to axienet local structure
+ * @reg:       Address offset from the base address of the Axi DMA core
+ * @value:     Value to be written into the Axi DMA register
+ *
+ * This function writes the desired value into the corresponding Axi DMA
+ * register.
+ */
+static inline void axienet_dma_out64(struct axienet_local *lp,
+                                    off_t reg, u64 value)
+{
+       iowrite64(value, lp->dma_regs + reg);
+}
+
+static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
+                                       dma_addr_t addr)
+{
+       if (lp->features & XAE_FEATURE_DMA_64BIT)
+               axienet_dma_out64(lp, reg, addr);
+       else
+               axienet_dma_out32(lp, reg, lower_32_bits(addr));
+}
+
+#else /* CONFIG_64BIT */
+
+static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
+                                dma_addr_t addr)
+{
+       axienet_dma_out32(lp, reg, lower_32_bits(addr));
+}
+
+#endif /* CONFIG_64BIT */
+
 /* Function prototypes visible in xilinx_axienet_mdio.c for other files */
 int axienet_mdio_enable(struct axienet_local *lp);
 void axienet_mdio_disable(struct axienet_local *lp);
index 93c9f30..1760930 100644 (file)
@@ -133,30 +133,6 @@ static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
        return ioread32(lp->dma_regs + reg);
 }
 
-/**
- * axienet_dma_out32 - Memory mapped Axi DMA register write.
- * @lp:                Pointer to axienet local structure
- * @reg:       Address offset from the base address of the Axi DMA core
- * @value:     Value to be written into the Axi DMA register
- *
- * This function writes the desired value into the corresponding Axi DMA
- * register.
- */
-static inline void axienet_dma_out32(struct axienet_local *lp,
-                                    off_t reg, u32 value)
-{
-       iowrite32(value, lp->dma_regs + reg);
-}
-
-static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
-                                dma_addr_t addr)
-{
-       axienet_dma_out32(lp, reg, lower_32_bits(addr));
-
-       if (lp->features & XAE_FEATURE_DMA_64BIT)
-               axienet_dma_out32(lp, reg + 4, upper_32_bits(addr));
-}
-
 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
                               struct axidma_bd *desc)
 {
@@ -2061,6 +2037,11 @@ static int axienet_probe(struct platform_device *pdev)
                        iowrite32(0x0, desc);
                }
        }
+       if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
+               dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
+               ret = -EINVAL;
+               goto cleanup_clk;
+       }
 
        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
        if (ret) {
index 45c3c4a..9fb5675 100644 (file)
@@ -99,6 +99,7 @@ struct sixpack {
 
        unsigned int            rx_count;
        unsigned int            rx_count_cooked;
+       spinlock_t              rxlock;
 
        int                     mtu;            /* Our mtu (to spot changes!) */
        int                     buffsize;       /* Max buffers sizes */
@@ -565,6 +566,7 @@ static int sixpack_open(struct tty_struct *tty)
        sp->dev = dev;
 
        spin_lock_init(&sp->lock);
+       spin_lock_init(&sp->rxlock);
        refcount_set(&sp->refcnt, 1);
        init_completion(&sp->dead);
 
@@ -913,6 +915,7 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
                        sp->led_state = 0x60;
                        /* fill trailing bytes with zeroes */
                        sp->tty->ops->write(sp->tty, &sp->led_state, 1);
+                       spin_lock_bh(&sp->rxlock);
                        rest = sp->rx_count;
                        if (rest != 0)
                                 for (i = rest; i <= 3; i++)
@@ -930,6 +933,7 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
                                sp_bump(sp, 0);
                        }
                        sp->rx_count_cooked = 0;
+                       spin_unlock_bh(&sp->rxlock);
                }
                break;
        case SIXP_TX_URUN: printk(KERN_DEBUG "6pack: TX underrun\n");
@@ -959,8 +963,11 @@ sixpack_decode(struct sixpack *sp, const unsigned char *pre_rbuff, int count)
                        decode_prio_command(sp, inbyte);
                else if ((inbyte & SIXP_STD_CMD_MASK) != 0)
                        decode_std_command(sp, inbyte);
-               else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK)
+               else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) {
+                       spin_lock_bh(&sp->rxlock);
                        decode_data(sp, inbyte);
+                       spin_unlock_bh(&sp->rxlock);
+               }
        }
 }
 
index 3233d14..495e85a 100644 (file)
@@ -214,7 +214,7 @@ struct ipa_init_modem_driver_req {
 
 /* The response to a IPA_QMI_INIT_DRIVER request begins with a standard
  * QMI response, but contains other information as well.  Currently we
- * simply wait for the the INIT_DRIVER transaction to complete and
+ * simply wait for the INIT_DRIVER transaction to complete and
  * ignore any other data that might be returned.
  */
 struct ipa_init_modem_driver_rsp {
index 817577e..f354fad 100644 (file)
@@ -243,6 +243,7 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
 #define DEFAULT_SEND_SCI true
 #define DEFAULT_ENCRYPT false
 #define DEFAULT_ENCODING_SA 0
+#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
 
 static bool send_sci(const struct macsec_secy *secy)
 {
@@ -1697,7 +1698,7 @@ static bool validate_add_rxsa(struct nlattr **attrs)
                return false;
 
        if (attrs[MACSEC_SA_ATTR_PN] &&
-           *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)
+           nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
                return false;
 
        if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -1753,7 +1754,8 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
        }
 
        pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
-       if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
+       if (tb_sa[MACSEC_SA_ATTR_PN] &&
+           nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
                pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
                          nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
                rtnl_unlock();
@@ -1769,7 +1771,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
                if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
                        pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
                                  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
-                                 MACSEC_SA_ATTR_SALT);
+                                 MACSEC_SALT_LEN);
                        rtnl_unlock();
                        return -EINVAL;
                }
@@ -1842,7 +1844,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
        return 0;
 
 cleanup:
-       kfree(rx_sa);
+       macsec_rxsa_put(rx_sa);
        rtnl_unlock();
        return err;
 }
@@ -1939,7 +1941,7 @@ static bool validate_add_txsa(struct nlattr **attrs)
        if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
                return false;
 
-       if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+       if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
                return false;
 
        if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -2011,7 +2013,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
                if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
                        pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
                                  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
-                                 MACSEC_SA_ATTR_SALT);
+                                 MACSEC_SALT_LEN);
                        rtnl_unlock();
                        return -EINVAL;
                }
@@ -2085,7 +2087,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
 
 cleanup:
        secy->operational = was_operational;
-       kfree(tx_sa);
+       macsec_txsa_put(tx_sa);
        rtnl_unlock();
        return err;
 }
@@ -2293,7 +2295,7 @@ static bool validate_upd_sa(struct nlattr **attrs)
        if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
                return false;
 
-       if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+       if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
                return false;
 
        if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -3745,9 +3747,6 @@ static int macsec_changelink_common(struct net_device *dev,
                secy->operational = tx_sa && tx_sa->active;
        }
 
-       if (data[IFLA_MACSEC_WINDOW])
-               secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
-
        if (data[IFLA_MACSEC_ENCRYPT])
                tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
 
@@ -3793,6 +3792,16 @@ static int macsec_changelink_common(struct net_device *dev,
                }
        }
 
+       if (data[IFLA_MACSEC_WINDOW]) {
+               secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
+
+               /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
+                * for XPN cipher suites */
+               if (secy->xpn &&
+                   secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
+                       return -EINVAL;
+       }
+
        return 0;
 }
 
@@ -3822,7 +3831,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
 
        ret = macsec_changelink_common(dev, data);
        if (ret)
-               return ret;
+               goto cleanup;
 
        /* If h/w offloading is available, propagate to the device */
        if (macsec_is_offloaded(macsec)) {
index 4cfd05c..d25fbb9 100644 (file)
@@ -896,7 +896,7 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
         */
        ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS);
        if (ret < 0)
-               return false;
+               return ret;
 
        if (ret & DW_VR_MII_C37_ANSGM_SP_LNKSTS) {
                int speed_value;
index a8db1a1..c7047f5 100644 (file)
@@ -34,6 +34,8 @@
 #define MDIO_AN_VEND_PROV                      0xc400
 #define MDIO_AN_VEND_PROV_1000BASET_FULL       BIT(15)
 #define MDIO_AN_VEND_PROV_1000BASET_HALF       BIT(14)
+#define MDIO_AN_VEND_PROV_5000BASET_FULL       BIT(11)
+#define MDIO_AN_VEND_PROV_2500BASET_FULL       BIT(10)
 #define MDIO_AN_VEND_PROV_DOWNSHIFT_EN         BIT(4)
 #define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK       GENMASK(3, 0)
 #define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT       4
@@ -231,9 +233,20 @@ static int aqr_config_aneg(struct phy_device *phydev)
                              phydev->advertising))
                reg |= MDIO_AN_VEND_PROV_1000BASET_HALF;
 
+       /* Handle the case when the 2.5G and 5G speeds are not advertised */
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+                             phydev->advertising))
+               reg |= MDIO_AN_VEND_PROV_2500BASET_FULL;
+
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+                             phydev->advertising))
+               reg |= MDIO_AN_VEND_PROV_5000BASET_FULL;
+
        ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
                                     MDIO_AN_VEND_PROV_1000BASET_HALF |
-                                    MDIO_AN_VEND_PROV_1000BASET_FULL, reg);
+                                    MDIO_AN_VEND_PROV_1000BASET_FULL |
+                                    MDIO_AN_VEND_PROV_2500BASET_FULL |
+                                    MDIO_AN_VEND_PROV_5000BASET_FULL, reg);
        if (ret < 0)
                return ret;
        if (ret > 0)
index 6a467e7..59fe356 100644 (file)
@@ -2072,6 +2072,8 @@ static struct phy_driver at803x_driver[] = {
        /* ATHEROS AR9331 */
        PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
        .name                   = "Qualcomm Atheros AR9331 built-in PHY",
+       .probe                  = at803x_probe,
+       .remove                 = at803x_remove,
        .suspend                = at803x_suspend,
        .resume                 = at803x_resume,
        .flags                  = PHY_POLL_CABLE_TEST,
@@ -2087,6 +2089,8 @@ static struct phy_driver at803x_driver[] = {
        /* Qualcomm Atheros QCA9561 */
        PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
        .name                   = "Qualcomm Atheros QCA9561 built-in PHY",
+       .probe                  = at803x_probe,
+       .remove                 = at803x_remove,
        .suspend                = at803x_suspend,
        .resume                 = at803x_resume,
        .flags                  = PHY_POLL_CABLE_TEST,
@@ -2151,6 +2155,8 @@ static struct phy_driver at803x_driver[] = {
        PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
        .name                   = "Qualcomm QCA8081",
        .flags                  = PHY_POLL_CABLE_TEST,
+       .probe                  = at803x_probe,
+       .remove                 = at803x_remove,
        .config_intr            = at803x_config_intr,
        .handle_interrupt       = at803x_handle_interrupt,
        .get_tunable            = at803x_get_tunable,
index 4578963..0f1e617 100644 (file)
@@ -88,8 +88,10 @@ static void asix_ax88772a_link_change_notify(struct phy_device *phydev)
        /* Reset PHY, otherwise MII_LPA will provide outdated information.
         * This issue is reproducible only with some link partner PHYs
         */
-       if (phydev->state == PHY_NOLINK && phydev->drv->soft_reset)
-               phydev->drv->soft_reset(phydev);
+       if (phydev->state == PHY_NOLINK) {
+               phy_init_hw(phydev);
+               phy_start_aneg(phydev);
+       }
 }
 
 static struct phy_driver asix_driver[] = {
index e6ad3a4..8549e0e 100644 (file)
@@ -229,9 +229,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
                if (misr_status < 0)
                        return misr_status;
 
-               misr_status |= (DP83822_RX_ERR_HF_INT_EN |
-                               DP83822_FALSE_CARRIER_HF_INT_EN |
-                               DP83822_LINK_STAT_INT_EN |
+               misr_status |= (DP83822_LINK_STAT_INT_EN |
                                DP83822_ENERGY_DET_INT_EN |
                                DP83822_LINK_QUAL_INT_EN);
 
index ef62f35..8d3ee3a 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/io.h>
 #include <linux/uaccess.h>
 #include <linux/atomic.h>
+#include <linux/suspend.h>
 #include <net/netlink.h>
 #include <net/genetlink.h>
 #include <net/sock.h>
@@ -976,6 +977,28 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
        struct phy_driver *drv = phydev->drv;
        irqreturn_t ret;
 
+       /* Wakeup interrupts may occur during a system sleep transition.
+        * Postpone handling until the PHY has resumed.
+        */
+       if (IS_ENABLED(CONFIG_PM_SLEEP) && phydev->irq_suspended) {
+               struct net_device *netdev = phydev->attached_dev;
+
+               if (netdev) {
+                       struct device *parent = netdev->dev.parent;
+
+                       if (netdev->wol_enabled)
+                               pm_system_wakeup();
+                       else if (device_may_wakeup(&netdev->dev))
+                               pm_wakeup_dev_event(&netdev->dev, 0, true);
+                       else if (parent && device_may_wakeup(parent))
+                               pm_wakeup_dev_event(parent, 0, true);
+               }
+
+               phydev->irq_rerun = 1;
+               disable_irq_nosync(irq);
+               return IRQ_HANDLED;
+       }
+
        mutex_lock(&phydev->lock);
        ret = drv->handle_interrupt(phydev);
        mutex_unlock(&phydev->lock);
index 431a871..46acddd 100644 (file)
@@ -278,6 +278,15 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev)
        if (phydev->mac_managed_pm)
                return 0;
 
+       /* Wakeup interrupts may occur during the system sleep transition when
+        * the PHY is inaccessible. Set flag to postpone handling until the PHY
+        * has resumed. Wait for concurrent interrupt handler to complete.
+        */
+       if (phy_interrupt_is_valid(phydev)) {
+               phydev->irq_suspended = 1;
+               synchronize_irq(phydev->irq);
+       }
+
        /* We must stop the state machine manually, otherwise it stops out of
         * control, possibly with the phydev->lock held. Upon resume, netdev
         * may call phy routines that try to grab the same lock, and that may
@@ -315,6 +324,20 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
        if (ret < 0)
                return ret;
 no_resume:
+       if (phy_interrupt_is_valid(phydev)) {
+               phydev->irq_suspended = 0;
+               synchronize_irq(phydev->irq);
+
+               /* Rerun interrupts which were postponed by phy_interrupt()
+                * because they occurred during the system sleep transition.
+                */
+               if (phydev->irq_rerun) {
+                       phydev->irq_rerun = 0;
+                       enable_irq(phydev->irq);
+                       irq_wake_thread(phydev->irq, phydev);
+               }
+       }
+
        if (phydev->attached_dev && phydev->adjust_link)
                phy_start_machine(phydev);
 
index 9a5d5a1..e7b0e12 100644 (file)
@@ -2516,7 +2516,7 @@ static int sfp_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, sfp);
 
-       err = devm_add_action(sfp->dev, sfp_cleanup, sfp);
+       err = devm_add_action_or_reset(sfp->dev, sfp_cleanup, sfp);
        if (err < 0)
                return err;
 
index 1b54684..96d3c40 100644 (file)
@@ -110,7 +110,7 @@ static int smsc_phy_config_init(struct phy_device *phydev)
        struct smsc_phy_priv *priv = phydev->priv;
        int rc;
 
-       if (!priv->energy_enable)
+       if (!priv->energy_enable || phydev->irq != PHY_POLL)
                return 0;
 
        rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@@ -210,6 +210,8 @@ static int lan95xx_config_aneg_ext(struct phy_device *phydev)
  * response on link pulses to detect presence of plugged Ethernet cable.
  * The Energy Detect Power-Down mode is enabled again in the end of procedure to
  * save approximately 220 mW of power if cable is unplugged.
+ * The workaround is only applicable to poll mode. Energy Detect Power-Down may
+ * not be used in interrupt mode lest link change detection becomes unreliable.
  */
 static int lan87xx_read_status(struct phy_device *phydev)
 {
@@ -217,7 +219,7 @@ static int lan87xx_read_status(struct phy_device *phydev)
 
        int err = genphy_read_status(phydev);
 
-       if (!phydev->link && priv->energy_enable) {
+       if (!phydev->link && priv->energy_enable && phydev->irq == PHY_POLL) {
                /* Disable EDPD to wake up PHY */
                int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
                if (rc < 0)
index ff22b6b..36803d9 100644 (file)
@@ -450,6 +450,7 @@ static int bcm5421_init(struct mii_phy* phy)
                int can_low_power = 1;
                if (np == NULL || of_get_property(np, "no-autolowpower", NULL))
                        can_low_power = 0;
+               of_node_put(np);
                if (can_low_power) {
                        /* Enable automatic low-power */
                        sungem_phy_write(phy, 0x1c, 0x9002);
index 87a635a..259b2b8 100644 (file)
@@ -273,6 +273,12 @@ static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
        }
 }
 
+static void tun_napi_enable(struct tun_file *tfile)
+{
+       if (tfile->napi_enabled)
+               napi_enable(&tfile->napi);
+}
+
 static void tun_napi_disable(struct tun_file *tfile)
 {
        if (tfile->napi_enabled)
@@ -634,7 +640,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
        tun = rtnl_dereference(tfile->tun);
 
        if (tun && clean) {
-               tun_napi_disable(tfile);
+               if (!tfile->detached)
+                       tun_napi_disable(tfile);
                tun_napi_del(tfile);
        }
 
@@ -653,8 +660,10 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
                if (clean) {
                        RCU_INIT_POINTER(tfile->tun, NULL);
                        sock_put(&tfile->sk);
-               } else
+               } else {
                        tun_disable_queue(tun, tfile);
+                       tun_napi_disable(tfile);
+               }
 
                synchronize_net();
                tun_flow_delete_by_queue(tun, tun->numqueues + 1);
@@ -727,6 +736,7 @@ static void tun_detach_all(struct net_device *dev)
                sock_put(&tfile->sk);
        }
        list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
+               tun_napi_del(tfile);
                tun_enable_queue(tfile);
                tun_queue_purge(tfile);
                xdp_rxq_info_unreg(&tfile->xdp_rxq);
@@ -807,6 +817,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
 
        if (tfile->detached) {
                tun_enable_queue(tfile);
+               tun_napi_enable(tfile);
        } else {
                sock_hold(&tfile->sk);
                tun_napi_init(tun, tfile, napi, napi_frags);
index 2c81236..45d3cc5 100644 (file)
         AX_MEDIUM_RE)
 
 #define AX88772_MEDIUM_DEFAULT \
-       (AX_MEDIUM_FD | AX_MEDIUM_RFC | \
-        AX_MEDIUM_TFC | AX_MEDIUM_PS | \
+       (AX_MEDIUM_FD | AX_MEDIUM_PS | \
         AX_MEDIUM_AC | AX_MEDIUM_RE)
 
 /* AX88772 & AX88178 RX_CTL values */
index 632fa6c..b4a1b7a 100644 (file)
@@ -431,6 +431,7 @@ void asix_adjust_link(struct net_device *netdev)
 
        asix_write_medium_mode(dev, mode, 0);
        phy_print_status(phydev);
+       usbnet_link_change(dev, phydev->link, 0);
 }
 
 int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm)
index 7a8c11a..ac2d400 100644 (file)
@@ -1472,6 +1472,42 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
         * are bundled into this buffer and where we can find an array of
         * per-packet metadata (which contains elements encoded into u16).
         */
+
+       /* SKB contents for current firmware:
+        *   <packet 1> <padding>
+        *   ...
+        *   <packet N> <padding>
+        *   <per-packet metadata entry 1> <dummy header>
+        *   ...
+        *   <per-packet metadata entry N> <dummy header>
+        *   <padding2> <rx_hdr>
+        *
+        * where:
+        *   <packet N> contains pkt_len bytes:
+        *              2 bytes of IP alignment pseudo header
+        *              packet received
+        *   <per-packet metadata entry N> contains 4 bytes:
+        *              pkt_len and fields AX_RXHDR_*
+        *   <padding>  0-7 bytes to terminate at
+        *              8 bytes boundary (64-bit).
+        *   <padding2> 4 bytes to make rx_hdr terminate at
+        *              8 bytes boundary (64-bit)
+        *   <dummy-header> contains 4 bytes:
+        *              pkt_len=0 and AX_RXHDR_DROP_ERR
+        *   <rx-hdr>   contains 4 bytes:
+        *              pkt_cnt and hdr_off (offset of
+        *                <per-packet metadata entry 1>)
+        *
+        * pkt_cnt is number of entrys in the per-packet metadata.
+        * In current firmware there is 2 entrys per packet.
+        * The first points to the packet and the
+        *  second is a dummy header.
+        * This was done probably to align fields in 64-bit and
+        *  maintain compatibility with old firmware.
+        * This code assumes that <dummy header> and <padding2> are
+        *  optional.
+        */
+
        if (skb->len < 4)
                return 0;
        skb_trim(skb, skb->len - 4);
@@ -1485,51 +1521,66 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        /* Make sure that the bounds of the metadata array are inside the SKB
         * (and in front of the counter at the end).
         */
-       if (pkt_cnt * 2 + hdr_off > skb->len)
+       if (pkt_cnt * 4 + hdr_off > skb->len)
                return 0;
        pkt_hdr = (u32 *)(skb->data + hdr_off);
 
        /* Packets must not overlap the metadata array */
        skb_trim(skb, hdr_off);
 
-       for (; ; pkt_cnt--, pkt_hdr++) {
+       for (; pkt_cnt > 0; pkt_cnt--, pkt_hdr++) {
+               u16 pkt_len_plus_padd;
                u16 pkt_len;
 
                le32_to_cpus(pkt_hdr);
                pkt_len = (*pkt_hdr >> 16) & 0x1fff;
+               pkt_len_plus_padd = (pkt_len + 7) & 0xfff8;
 
-               if (pkt_len > skb->len)
+               /* Skip dummy header used for alignment
+                */
+               if (pkt_len == 0)
+                       continue;
+
+               if (pkt_len_plus_padd > skb->len)
                        return 0;
 
                /* Check CRC or runt packet */
-               if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) &&
-                   pkt_len >= 2 + ETH_HLEN) {
-                       bool last = (pkt_cnt == 0);
-
-                       if (last) {
-                               ax_skb = skb;
-                       } else {
-                               ax_skb = skb_clone(skb, GFP_ATOMIC);
-                               if (!ax_skb)
-                                       return 0;
-                       }
-                       ax_skb->len = pkt_len;
-                       /* Skip IP alignment pseudo header */
-                       skb_pull(ax_skb, 2);
-                       skb_set_tail_pointer(ax_skb, ax_skb->len);
-                       ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
-                       ax88179_rx_checksum(ax_skb, pkt_hdr);
+               if ((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) ||
+                   pkt_len < 2 + ETH_HLEN) {
+                       dev->net->stats.rx_errors++;
+                       skb_pull(skb, pkt_len_plus_padd);
+                       continue;
+               }
 
-                       if (last)
-                               return 1;
+               /* last packet */
+               if (pkt_len_plus_padd == skb->len) {
+                       skb_trim(skb, pkt_len);
 
-                       usbnet_skb_return(dev, ax_skb);
+                       /* Skip IP alignment pseudo header */
+                       skb_pull(skb, 2);
+
+                       skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
+                       ax88179_rx_checksum(skb, pkt_hdr);
+                       return 1;
                }
 
-               /* Trim this packet away from the SKB */
-               if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8))
+               ax_skb = skb_clone(skb, GFP_ATOMIC);
+               if (!ax_skb)
                        return 0;
+               skb_trim(ax_skb, pkt_len);
+
+               /* Skip IP alignment pseudo header */
+               skb_pull(ax_skb, 2);
+
+               skb->truesize = pkt_len_plus_padd +
+                               SKB_DATA_ALIGN(sizeof(struct sk_buff));
+               ax88179_rx_checksum(ax_skb, pkt_hdr);
+               usbnet_skb_return(dev, ax_skb);
+
+               skb_pull(skb, pkt_len_plus_padd);
        }
+
+       return 0;
 }
 
 static struct sk_buff *
@@ -1750,7 +1801,7 @@ static const struct driver_info ax88179_info = {
        .link_reset = ax88179_link_reset,
        .reset = ax88179_reset,
        .stop = ax88179_stop,
-       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
        .rx_fixup = ax88179_rx_fixup,
        .tx_fixup = ax88179_tx_fixup,
 };
@@ -1763,7 +1814,7 @@ static const struct driver_info ax88178a_info = {
        .link_reset = ax88179_link_reset,
        .reset = ax88179_reset,
        .stop = ax88179_stop,
-       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
        .rx_fixup = ax88179_rx_fixup,
        .tx_fixup = ax88179_tx_fixup,
 };
@@ -1776,7 +1827,7 @@ static const struct driver_info cypress_GX3_info = {
        .link_reset = ax88179_link_reset,
        .reset = ax88179_reset,
        .stop = ax88179_stop,
-       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
        .rx_fixup = ax88179_rx_fixup,
        .tx_fixup = ax88179_tx_fixup,
 };
@@ -1789,7 +1840,7 @@ static const struct driver_info dlink_dub1312_info = {
        .link_reset = ax88179_link_reset,
        .reset = ax88179_reset,
        .stop = ax88179_stop,
-       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
        .rx_fixup = ax88179_rx_fixup,
        .tx_fixup = ax88179_tx_fixup,
 };
@@ -1802,7 +1853,7 @@ static const struct driver_info sitecom_info = {
        .link_reset = ax88179_link_reset,
        .reset = ax88179_reset,
        .stop = ax88179_stop,
-       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
        .rx_fixup = ax88179_rx_fixup,
        .tx_fixup = ax88179_tx_fixup,
 };
@@ -1815,7 +1866,7 @@ static const struct driver_info samsung_info = {
        .link_reset = ax88179_link_reset,
        .reset = ax88179_reset,
        .stop = ax88179_stop,
-       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
        .rx_fixup = ax88179_rx_fixup,
        .tx_fixup = ax88179_tx_fixup,
 };
@@ -1828,7 +1879,7 @@ static const struct driver_info lenovo_info = {
        .link_reset = ax88179_link_reset,
        .reset = ax88179_reset,
        .stop = ax88179_stop,
-       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
        .rx_fixup = ax88179_rx_fixup,
        .tx_fixup = ax88179_tx_fixup,
 };
@@ -1841,7 +1892,7 @@ static const struct driver_info belkin_info = {
        .link_reset = ax88179_link_reset,
        .reset  = ax88179_reset,
        .stop   = ax88179_stop,
-       .flags  = FLAG_ETHER | FLAG_FRAMING_AX,
+       .flags  = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
        .rx_fixup = ax88179_rx_fixup,
        .tx_fixup = ax88179_tx_fixup,
 };
@@ -1854,7 +1905,7 @@ static const struct driver_info toshiba_info = {
        .link_reset = ax88179_link_reset,
        .reset  = ax88179_reset,
        .stop = ax88179_stop,
-       .flags  = FLAG_ETHER | FLAG_FRAMING_AX,
+       .flags  = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
        .rx_fixup = ax88179_rx_fixup,
        .tx_fixup = ax88179_tx_fixup,
 };
@@ -1867,7 +1918,7 @@ static const struct driver_info mct_info = {
        .link_reset = ax88179_link_reset,
        .reset  = ax88179_reset,
        .stop   = ax88179_stop,
-       .flags  = FLAG_ETHER | FLAG_FRAMING_AX,
+       .flags  = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
        .rx_fixup = ax88179_rx_fixup,
        .tx_fixup = ax88179_tx_fixup,
 };
@@ -1880,7 +1931,7 @@ static const struct driver_info at_umc2000_info = {
        .link_reset = ax88179_link_reset,
        .reset  = ax88179_reset,
        .stop   = ax88179_stop,
-       .flags  = FLAG_ETHER | FLAG_FRAMING_AX,
+       .flags  = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
        .rx_fixup = ax88179_rx_fixup,
        .tx_fixup = ax88179_tx_fixup,
 };
@@ -1893,7 +1944,7 @@ static const struct driver_info at_umc200_info = {
        .link_reset = ax88179_link_reset,
        .reset  = ax88179_reset,
        .stop   = ax88179_stop,
-       .flags  = FLAG_ETHER | FLAG_FRAMING_AX,
+       .flags  = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
        .rx_fixup = ax88179_rx_fixup,
        .tx_fixup = ax88179_tx_fixup,
 };
@@ -1906,7 +1957,7 @@ static const struct driver_info at_umc2000sp_info = {
        .link_reset = ax88179_link_reset,
        .reset  = ax88179_reset,
        .stop   = ax88179_stop,
-       .flags  = FLAG_ETHER | FLAG_FRAMING_AX,
+       .flags  = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
        .rx_fixup = ax88179_rx_fixup,
        .tx_fixup = ax88179_tx_fixup,
 };
index e7fe9c0..1a376ed 100644 (file)
@@ -781,7 +781,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                        intf->altsetting->desc.bInterfaceNumber, 1)) {
                dev_err(dev, "Can't set altsetting 1.\n");
                ret = -EIO;
-               goto fail_mem;;
+               goto fail_mem;
        }
 
        netdev = alloc_etherdev(sizeof(struct catc));
index 7389d6e..0f6efaa 100644 (file)
@@ -32,7 +32,7 @@
 #define NETNEXT_VERSION                "12"
 
 /* Information for net */
-#define NET_VERSION            "12"
+#define NET_VERSION            "13"
 
 #define DRIVER_VERSION         "v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -2156,7 +2156,7 @@ static inline void rtl_rx_vlan_tag(struct rx_desc *desc, struct sk_buff *skb)
 }
 
 static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
-                        struct sk_buff *skb, u32 len, u32 transport_offset)
+                        struct sk_buff *skb, u32 len)
 {
        u32 mss = skb_shinfo(skb)->gso_size;
        u32 opts1, opts2 = 0;
@@ -2167,6 +2167,8 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
        opts1 = len | TX_FS | TX_LS;
 
        if (mss) {
+               u32 transport_offset = (u32)skb_transport_offset(skb);
+
                if (transport_offset > GTTCPHO_MAX) {
                        netif_warn(tp, tx_err, tp->netdev,
                                   "Invalid transport offset 0x%x for TSO\n",
@@ -2197,6 +2199,7 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
                opts1 |= transport_offset << GTTCPHO_SHIFT;
                opts2 |= min(mss, MSS_MAX) << MSS_SHIFT;
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               u32 transport_offset = (u32)skb_transport_offset(skb);
                u8 ip_protocol;
 
                if (transport_offset > TCPHO_MAX) {
@@ -2260,7 +2263,6 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
                struct tx_desc *tx_desc;
                struct sk_buff *skb;
                unsigned int len;
-               u32 offset;
 
                skb = __skb_dequeue(&skb_head);
                if (!skb)
@@ -2276,9 +2278,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
                tx_data = tx_agg_align(tx_data);
                tx_desc = (struct tx_desc *)tx_data;
 
-               offset = (u32)skb_transport_offset(skb);
-
-               if (r8152_tx_csum(tp, tx_desc, skb, skb->len, offset)) {
+               if (r8152_tx_csum(tp, tx_desc, skb, skb->len)) {
                        r8152_csum_workaround(tp, skb, &skb_head);
                        continue;
                }
@@ -2759,9 +2759,9 @@ rtl8152_features_check(struct sk_buff *skb, struct net_device *dev,
 {
        u32 mss = skb_shinfo(skb)->gso_size;
        int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX;
-       int offset = skb_transport_offset(skb);
 
-       if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset)
+       if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) &&
+           skb_transport_offset(skb) > max_offset)
                features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
        else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)
                features &= ~NETIF_F_GSO_MASK;
@@ -5917,7 +5917,8 @@ static void r8153_enter_oob(struct r8152 *tp)
 
        wait_oob_link_list_ready(tp);
 
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu));
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522);
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT);
 
        switch (tp->version) {
        case RTL_VER_03:
@@ -5953,6 +5954,10 @@ static void r8153_enter_oob(struct r8152 *tp)
        ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
 
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+       ocp_data |= MCU_BORW_EN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
        rxdy_gated_en(tp, false);
 
        ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
@@ -6555,6 +6560,9 @@ static void rtl8156_down(struct r8152 *tp)
        rtl_disable(tp);
        rtl_reset_bmu(tp);
 
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522);
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT);
+
        /* Clear teredo wake event. bit[15:8] is the teredo wakeup
         * type. Set it to zero. bits[7:0] are the W1C bits about
         * the events. Set them to all 1 to clear them.
@@ -6565,6 +6573,10 @@ static void rtl8156_down(struct r8152 *tp)
        ocp_data |= NOW_IS_OOB;
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
 
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+       ocp_data |= MCU_BORW_EN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
        rtl_rx_vlan_en(tp, true);
        rxdy_gated_en(tp, false);
 
index 1cb6dab..78a9275 100644 (file)
@@ -2004,7 +2004,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
                   cmd, reqtype, value, index, size);
 
        if (size) {
-               buf = kmalloc(size, GFP_KERNEL);
+               buf = kmalloc(size, GFP_NOIO);
                if (!buf)
                        goto out;
        }
@@ -2036,7 +2036,7 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
                   cmd, reqtype, value, index, size);
 
        if (data) {
-               buf = kmemdup(data, size, GFP_KERNEL);
+               buf = kmemdup(data, size, GFP_NOIO);
                if (!buf)
                        goto out;
        } else {
@@ -2137,7 +2137,7 @@ static void usbnet_async_cmd_cb(struct urb *urb)
 int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
                           u16 value, u16 index, const void *data, u16 size)
 {
-       struct usb_ctrlrequest *req = NULL;
+       struct usb_ctrlrequest *req;
        struct urb *urb;
        int err = -ENOMEM;
        void *buf = NULL;
@@ -2155,7 +2155,7 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
                if (!buf) {
                        netdev_err(dev->net, "Error allocating buffer"
                                   " in %s!\n", __func__);
-                       goto fail_free;
+                       goto fail_free_urb;
                }
        }
 
@@ -2179,14 +2179,21 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
        if (err < 0) {
                netdev_err(dev->net, "Error submitting the control"
                           " message: status=%d\n", err);
-               goto fail_free;
+               goto fail_free_all;
        }
        return 0;
 
+fail_free_all:
+       kfree(req);
 fail_free_buf:
        kfree(buf);
-fail_free:
-       kfree(req);
+       /*
+        * avoid a double free
+        * needed because the flag can be set only
+        * after filling the URB
+        */
+       urb->transfer_flags = 0;
+fail_free_urb:
        usb_free_urb(urb);
 fail:
        return err;
index 466da01..2cb833b 100644 (file)
@@ -312,6 +312,7 @@ static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
 static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+       struct netdev_queue *queue = NULL;
        struct veth_rq *rq = NULL;
        struct net_device *rcv;
        int length = skb->len;
@@ -329,6 +330,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
        rxq = skb_get_queue_mapping(skb);
        if (rxq < rcv->real_num_rx_queues) {
                rq = &rcv_priv->rq[rxq];
+               queue = netdev_get_tx_queue(dev, rxq);
 
                /* The napi pointer is available when an XDP program is
                 * attached or when GRO is enabled
@@ -340,6 +342,8 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
 
        skb_tx_timestamp(skb);
        if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
+               if (queue)
+                       txq_trans_cond_update(queue);
                if (!use_napi)
                        dev_lstats_add(dev, length);
        } else {
index db05b5e..ec8e1b3 100644 (file)
@@ -242,9 +242,15 @@ struct virtnet_info {
        /* Packet virtio header size */
        u8 hdr_len;
 
-       /* Work struct for refilling if we run low on memory. */
+       /* Work struct for delayed refilling if we run low on memory. */
        struct delayed_work refill;
 
+       /* Is delayed refill enabled? */
+       bool refill_enabled;
+
+       /* The lock to synchronize the access to refill_enabled */
+       spinlock_t refill_lock;
+
        /* Work struct for config space updates */
        struct work_struct config_work;
 
@@ -348,6 +354,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
        return p;
 }
 
+static void enable_delayed_refill(struct virtnet_info *vi)
+{
+       spin_lock_bh(&vi->refill_lock);
+       vi->refill_enabled = true;
+       spin_unlock_bh(&vi->refill_lock);
+}
+
+static void disable_delayed_refill(struct virtnet_info *vi)
+{
+       spin_lock_bh(&vi->refill_lock);
+       vi->refill_enabled = false;
+       spin_unlock_bh(&vi->refill_lock);
+}
+
 static void virtqueue_napi_schedule(struct napi_struct *napi,
                                    struct virtqueue *vq)
 {
@@ -1527,8 +1547,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
        }
 
        if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
-               if (!try_fill_recv(vi, rq, GFP_ATOMIC))
-                       schedule_delayed_work(&vi->refill, 0);
+               if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
+                       spin_lock(&vi->refill_lock);
+                       if (vi->refill_enabled)
+                               schedule_delayed_work(&vi->refill, 0);
+                       spin_unlock(&vi->refill_lock);
+               }
        }
 
        u64_stats_update_begin(&rq->stats.syncp);
@@ -1651,6 +1675,8 @@ static int virtnet_open(struct net_device *dev)
        struct virtnet_info *vi = netdev_priv(dev);
        int i, err;
 
+       enable_delayed_refill(vi);
+
        for (i = 0; i < vi->max_queue_pairs; i++) {
                if (i < vi->curr_queue_pairs)
                        /* Make sure we have some buffers: if oom use wq. */
@@ -2033,6 +2059,8 @@ static int virtnet_close(struct net_device *dev)
        struct virtnet_info *vi = netdev_priv(dev);
        int i;
 
+       /* Make sure NAPI doesn't schedule refill work */
+       disable_delayed_refill(vi);
        /* Make sure refill_work doesn't re-enable napi! */
        cancel_delayed_work_sync(&vi->refill);
 
@@ -2768,7 +2796,6 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
 static void virtnet_freeze_down(struct virtio_device *vdev)
 {
        struct virtnet_info *vi = vdev->priv;
-       int i;
 
        /* Make sure no work handler is accessing the device */
        flush_work(&vi->config_work);
@@ -2776,14 +2803,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
        netif_tx_lock_bh(vi->dev);
        netif_device_detach(vi->dev);
        netif_tx_unlock_bh(vi->dev);
-       cancel_delayed_work_sync(&vi->refill);
-
-       if (netif_running(vi->dev)) {
-               for (i = 0; i < vi->max_queue_pairs; i++) {
-                       napi_disable(&vi->rq[i].napi);
-                       virtnet_napi_tx_disable(&vi->sq[i].napi);
-               }
-       }
+       if (netif_running(vi->dev))
+               virtnet_close(vi->dev);
 }
 
 static int init_vqs(struct virtnet_info *vi);
@@ -2791,7 +2812,7 @@ static int init_vqs(struct virtnet_info *vi);
 static int virtnet_restore_up(struct virtio_device *vdev)
 {
        struct virtnet_info *vi = vdev->priv;
-       int err, i;
+       int err;
 
        err = init_vqs(vi);
        if (err)
@@ -2799,16 +2820,12 @@ static int virtnet_restore_up(struct virtio_device *vdev)
 
        virtio_device_ready(vdev);
 
-       if (netif_running(vi->dev)) {
-               for (i = 0; i < vi->curr_queue_pairs; i++)
-                       if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
-                               schedule_delayed_work(&vi->refill, 0);
+       enable_delayed_refill(vi);
 
-               for (i = 0; i < vi->max_queue_pairs; i++) {
-                       virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
-                       virtnet_napi_tx_enable(vi, vi->sq[i].vq,
-                                              &vi->sq[i].napi);
-               }
+       if (netif_running(vi->dev)) {
+               err = virtnet_open(vi->dev);
+               if (err)
+                       return err;
        }
 
        netif_tx_lock_bh(vi->dev);
@@ -3548,6 +3565,7 @@ static int virtnet_probe(struct virtio_device *vdev)
        vdev->priv = vi;
 
        INIT_WORK(&vi->config_work, virtnet_config_changed_work);
+       spin_lock_init(&vi->refill_lock);
 
        /* If we can receive ANY GSO packets, we must allocate large ones. */
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
@@ -3655,14 +3673,20 @@ static int virtnet_probe(struct virtio_device *vdev)
        if (vi->has_rss || vi->has_rss_hash_report)
                virtnet_init_default_rss(vi);
 
-       err = register_netdev(dev);
+       /* serialize netdev register + virtio_device_ready() with ndo_open() */
+       rtnl_lock();
+
+       err = register_netdevice(dev);
        if (err) {
                pr_debug("virtio_net: registering device failed\n");
+               rtnl_unlock();
                goto free_failover;
        }
 
        virtio_device_ready(vdev);
 
+       rtnl_unlock();
+
        err = virtnet_cpu_notif_add(vi);
        if (err) {
                pr_debug("virtio_net: registering cpu notifier failed\n");
index 84d1c70..7b1dc19 100644 (file)
@@ -3822,7 +3822,8 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
 
        switch (ev->evt_type) {
        case WMI_BSS_COLOR_COLLISION_DETECTION:
-               ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap);
+               ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
+                                                      GFP_KERNEL);
                ath11k_dbg(ab, ATH11K_DBG_WMI,
                           "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
                           ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
index 2f746eb..6f83af8 100644 (file)
@@ -4912,6 +4912,8 @@ static int hwsim_virtio_probe(struct virtio_device *vdev)
        if (err)
                return err;
 
+       virtio_device_ready(vdev);
+
        err = fill_vq(hwsim_vqs[HWSIM_VQ_RX]);
        if (err)
                goto out_remove;
index c02be4a..7db627f 100644 (file)
@@ -1233,9 +1233,6 @@ struct rtw_chip_info {
        const struct wiphy_wowlan_support *wowlan_stub;
        const u8 max_sched_scan_ssids;
 
-       /* for 8821c set channel */
-       u32 ch_param[3];
-
        /* coex paras */
        u32 coex_para_ver;
        u8 bt_desired_ver;
@@ -1937,6 +1934,9 @@ struct rtw_hal {
 
        enum rtw_sar_bands sar_band;
        struct rtw_sar sar;
+
+       /* for 8821c set channel */
+       u32 ch_param[3];
 };
 
 struct rtw_path_div {
index ffee39e..488a7dd 100644 (file)
@@ -125,6 +125,7 @@ static void rtw8821c_phy_bf_init(struct rtw_dev *rtwdev)
 
 static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev)
 {
+       struct rtw_hal *hal = &rtwdev->hal;
        u8 crystal_cap, val;
 
        /* power on BB/RF domain */
@@ -159,9 +160,9 @@ static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev)
 
        /* post init after header files config */
        rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST);
-       rtwdev->chip->ch_param[0] = rtw_read32_mask(rtwdev, REG_TXSF2, MASKDWORD);
-       rtwdev->chip->ch_param[1] = rtw_read32_mask(rtwdev, REG_TXSF6, MASKDWORD);
-       rtwdev->chip->ch_param[2] = rtw_read32_mask(rtwdev, REG_TXFILTER, MASKDWORD);
+       hal->ch_param[0] = rtw_read32_mask(rtwdev, REG_TXSF2, MASKDWORD);
+       hal->ch_param[1] = rtw_read32_mask(rtwdev, REG_TXSF6, MASKDWORD);
+       hal->ch_param[2] = rtw_read32_mask(rtwdev, REG_TXFILTER, MASKDWORD);
 
        rtw_phy_init(rtwdev);
        rtwdev->dm_info.cck_pd_default = rtw_read8(rtwdev, REG_CSRATIO) & 0x1f;
@@ -351,6 +352,7 @@ static void rtw8821c_set_channel_rxdfir(struct rtw_dev *rtwdev, u8 bw)
 static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
                                    u8 primary_ch_idx)
 {
+       struct rtw_hal *hal = &rtwdev->hal;
        u32 val32;
 
        if (channel <= 14) {
@@ -367,11 +369,11 @@ static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
                        rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, 0x00003667);
                } else {
                        rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD,
-                                        rtwdev->chip->ch_param[0]);
+                                        hal->ch_param[0]);
                        rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD,
-                                        rtwdev->chip->ch_param[1] & MASKLWORD);
+                                        hal->ch_param[1] & MASKLWORD);
                        rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD,
-                                        rtwdev->chip->ch_param[2]);
+                                        hal->ch_param[2]);
                }
        } else if (channel > 35) {
                rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x1);
index dbac4c0..a033540 100644 (file)
@@ -495,6 +495,7 @@ void xenvif_rx_action(struct xenvif_queue *queue)
        queue->rx_copy.completed = &completed_skbs;
 
        while (xenvif_rx_ring_slots_available(queue) &&
+              !skb_queue_empty(&queue->rx_queue) &&
               work_done < RX_BATCH_SIZE) {
                xenvif_rx_skb(queue);
                work_done++;
index 8c0b954..2409007 100644 (file)
@@ -66,6 +66,10 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644);
 MODULE_PARM_DESC(max_queues,
                 "Maximum number of queues per virtual interface");
 
+static bool __read_mostly xennet_trusted = true;
+module_param_named(trusted, xennet_trusted, bool, 0644);
+MODULE_PARM_DESC(trusted, "Is the backend trusted");
+
 #define XENNET_TIMEOUT  (5 * HZ)
 
 static const struct ethtool_ops xennet_ethtool_ops;
@@ -173,6 +177,9 @@ struct netfront_info {
        /* Is device behaving sane? */
        bool broken;
 
+       /* Should skbs be bounced into a zeroed buffer? */
+       bool bounce;
+
        atomic_t rx_gso_checksum_fixup;
 };
 
@@ -271,7 +278,8 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
        if (unlikely(!skb))
                return NULL;
 
-       page = page_pool_dev_alloc_pages(queue->page_pool);
+       page = page_pool_alloc_pages(queue->page_pool,
+                                    GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
        if (unlikely(!page)) {
                kfree_skb(skb);
                return NULL;
@@ -665,6 +673,33 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
        return nxmit;
 }
 
+struct sk_buff *bounce_skb(const struct sk_buff *skb)
+{
+       unsigned int headerlen = skb_headroom(skb);
+       /* Align size to allocate full pages and avoid contiguous data leaks */
+       unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
+                                 XEN_PAGE_SIZE);
+       struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
+
+       if (!n)
+               return NULL;
+
+       if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
+               WARN_ONCE(1, "misaligned skb allocated\n");
+               kfree_skb(n);
+               return NULL;
+       }
+
+       /* Set the data pointer */
+       skb_reserve(n, headerlen);
+       /* Set the tail pointer and length */
+       skb_put(n, skb->len);
+
+       BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
+
+       skb_copy_header(n, skb);
+       return n;
+}
 
 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
 
@@ -718,9 +753,13 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
 
        /* The first req should be at least ETH_HLEN size or the packet will be
         * dropped by netback.
+        *
+        * If the backend is not trusted bounce all data to zeroed pages to
+        * avoid exposing contiguous data on the granted page not belonging to
+        * the skb.
         */
-       if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
-               nskb = skb_copy(skb, GFP_ATOMIC);
+       if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
+               nskb = bounce_skb(skb);
                if (!nskb)
                        goto drop;
                dev_consume_skb_any(skb);
@@ -1053,8 +1092,10 @@ static int xennet_get_responses(struct netfront_queue *queue,
                        }
                }
                rcu_read_unlock();
-next:
+
                __skb_queue_tail(list, skb);
+
+next:
                if (!(rx->flags & XEN_NETRXF_more_data))
                        break;
 
@@ -2214,6 +2255,10 @@ static int talk_to_netback(struct xenbus_device *dev,
 
        info->netdev->irq = 0;
 
+       /* Check if backend is trusted. */
+       info->bounce = !xennet_trusted ||
+                      !xenbus_read_unsigned(dev->nodename, "trusted", 1);
+
        /* Check if backend supports multiple queues */
        max_queues = xenbus_read_unsigned(info->xbdev->otherend,
                                          "multi-queue-max-queues", 1);
@@ -2381,6 +2426,9 @@ static int xennet_connect(struct net_device *dev)
                return err;
        if (np->netback_has_xdp_headroom)
                pr_info("backend supports XDP headroom\n");
+       if (np->bounce)
+               dev_info(&np->xbdev->dev,
+                        "bouncing transmitted data to zeroed pages\n");
 
        /* talk_to_netback() sets the correct number of queues */
        num_queues = dev->real_num_tx_queues;
index ceef81d..01329b9 100644 (file)
@@ -167,9 +167,9 @@ static int nfcmrvl_i2c_parse_dt(struct device_node *node,
                pdata->irq_polarity = IRQF_TRIGGER_RISING;
 
        ret = irq_of_parse_and_map(node, 0);
-       if (ret < 0) {
-               pr_err("Unable to get irq, error: %d\n", ret);
-               return ret;
+       if (!ret) {
+               pr_err("Unable to get irq\n");
+               return -EINVAL;
        }
        pdata->irq = ret;
 
index a38e2fc..ad3359a 100644 (file)
@@ -115,9 +115,9 @@ static int nfcmrvl_spi_parse_dt(struct device_node *node,
        }
 
        ret = irq_of_parse_and_map(node, 0);
-       if (ret < 0) {
-               pr_err("Unable to get irq, error: %d\n", ret);
-               return ret;
+       if (!ret) {
+               pr_err("Unable to get irq\n");
+               return -EINVAL;
        }
        pdata->irq = ret;
 
index 7e451c1..ae2ba08 100644 (file)
@@ -122,7 +122,9 @@ static int nxp_nci_i2c_fw_read(struct nxp_nci_i2c_phy *phy,
        skb_put_data(*skb, &header, NXP_NCI_FW_HDR_LEN);
 
        r = i2c_master_recv(client, skb_put(*skb, frame_len), frame_len);
-       if (r != frame_len) {
+       if (r < 0) {
+               goto fw_read_exit_free_skb;
+       } else if (r != frame_len) {
                nfc_err(&client->dev,
                        "Invalid frame length: %u (expected %zu)\n",
                        r, frame_len);
@@ -162,8 +164,13 @@ static int nxp_nci_i2c_nci_read(struct nxp_nci_i2c_phy *phy,
 
        skb_put_data(*skb, (void *)&header, NCI_CTRL_HDR_SIZE);
 
+       if (!header.plen)
+               return 0;
+
        r = i2c_master_recv(client, skb_put(*skb, header.plen), header.plen);
-       if (r != header.plen) {
+       if (r < 0) {
+               goto nci_read_exit_free_skb;
+       } else if (r != header.plen) {
                nfc_err(&client->dev,
                        "Invalid frame payload length: %u (expected %u)\n",
                        r, header.plen);
index a4fc17d..b38d035 100644 (file)
@@ -176,8 +176,8 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
        ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
 
        /* make sure we are in the region */
-       if (ctx->phys < nd_region->ndr_start
-                       || (ctx->phys + ctx->cleared) > ndr_end)
+       if (ctx->phys < nd_region->ndr_start ||
+           (ctx->phys + ctx->cleared - 1) > ndr_end)
                return 0;
 
        sector = (ctx->phys - nd_region->ndr_start) / 512;
index 24165da..6a12a90 100644 (file)
@@ -2546,6 +2546,20 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
                .vid = 0x1e0f,
                .mn = "KCD6XVUL6T40",
                .quirks = NVME_QUIRK_NO_APST,
+       },
+       {
+               /*
+                * The external Samsung X5 SSD fails initialization without a
+                * delay before checking if it is ready and has a whole set of
+                * other problems.  To make this even more interesting, it
+                * shares the PCI ID with internal Samsung 970 Evo Plus that
+                * does not need or want these quirks.
+                */
+               .vid = 0x144d,
+               .mn = "Samsung Portable SSD X5",
+               .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+                         NVME_QUIRK_NO_DEEPEST_PS |
+                         NVME_QUIRK_IGNORE_DEV_SUBNQN,
        }
 };
 
@@ -3285,8 +3299,8 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
         * we have no UUID set
         */
        if (uuid_is_null(&ids->uuid)) {
-               printk_ratelimited(KERN_WARNING
-                                  "No UUID available providing old NGUID\n");
+               dev_warn_ratelimited(dev,
+                       "No UUID available providing old NGUID\n");
                return sysfs_emit(buf, "%pU\n", ids->nguid);
        }
        return sysfs_emit(buf, "%pU\n", &ids->uuid);
@@ -3772,7 +3786,7 @@ static int nvme_add_ns_cdev(struct nvme_ns *ns)
 }
 
 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
-               unsigned nsid, struct nvme_ns_ids *ids)
+               unsigned nsid, struct nvme_ns_ids *ids, bool is_shared)
 {
        struct nvme_ns_head *head;
        size_t size = sizeof(*head);
@@ -3796,6 +3810,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
        head->subsys = ctrl->subsys;
        head->ns_id = nsid;
        head->ids = *ids;
+       head->shared = is_shared;
        kref_init(&head->ref);
 
        if (head->ids.csi) {
@@ -3863,6 +3878,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
        if (ret) {
                dev_err(ctrl->device,
                        "globally duplicate IDs for nsid %d\n", nsid);
+               nvme_print_device_info(ctrl);
                return ret;
        }
 
@@ -3876,12 +3892,11 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
                                nsid);
                        goto out_unlock;
                }
-               head = nvme_alloc_ns_head(ctrl, nsid, ids);
+               head = nvme_alloc_ns_head(ctrl, nsid, ids, is_shared);
                if (IS_ERR(head)) {
                        ret = PTR_ERR(head);
                        goto out_unlock;
                }
-               head->shared = is_shared;
        } else {
                ret = -EINVAL;
                if (!is_shared || !head->shared) {
@@ -4580,6 +4595,8 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
        nvme_stop_failfast_work(ctrl);
        flush_work(&ctrl->async_event_work);
        cancel_work_sync(&ctrl->fw_act_work);
+       if (ctrl->ops->stop_ctrl)
+               ctrl->ops->stop_ctrl(ctrl);
 }
 EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
 
index 9b72b6e..5558f88 100644 (file)
@@ -502,7 +502,9 @@ struct nvme_ctrl_ops {
        void (*free_ctrl)(struct nvme_ctrl *ctrl);
        void (*submit_async_event)(struct nvme_ctrl *ctrl);
        void (*delete_ctrl)(struct nvme_ctrl *ctrl);
+       void (*stop_ctrl)(struct nvme_ctrl *ctrl);
        int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
+       void (*print_device_info)(struct nvme_ctrl *ctrl);
 };
 
 /*
@@ -548,6 +550,33 @@ static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
        return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
 }
 
+/*
+ * Return the length of the string without the space padding
+ */
+static inline int nvme_strlen(char *s, int len)
+{
+       while (s[len - 1] == ' ')
+               len--;
+       return len;
+}
+
+static inline void nvme_print_device_info(struct nvme_ctrl *ctrl)
+{
+       struct nvme_subsystem *subsys = ctrl->subsys;
+
+       if (ctrl->ops->print_device_info) {
+               ctrl->ops->print_device_info(ctrl);
+               return;
+       }
+
+       dev_err(ctrl->device,
+               "VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id,
+               nvme_strlen(subsys->model, sizeof(subsys->model)),
+               subsys->model, nvme_strlen(subsys->firmware_rev,
+                                          sizeof(subsys->firmware_rev)),
+               subsys->firmware_rev);
+}
+
 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
 void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
                            const char *dev_name);
index 48f4f6e..73d9fcb 100644 (file)
@@ -1334,6 +1334,14 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
                dev_warn(dev->ctrl.device,
                         "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
                         csts, result);
+
+       if (csts != ~0)
+               return;
+
+       dev_warn(dev->ctrl.device,
+                "Does your device have a faulty power saving mode enabled?\n");
+       dev_warn(dev->ctrl.device,
+                "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n");
 }
 
 static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
@@ -2682,8 +2690,13 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
        struct pci_dev *pdev = to_pci_dev(dev->dev);
 
        mutex_lock(&dev->shutdown_lock);
-       if (pci_device_is_present(pdev) && pci_is_enabled(pdev)) {
-               u32 csts = readl(dev->bar + NVME_REG_CSTS);
+       if (pci_is_enabled(pdev)) {
+               u32 csts;
+
+               if (pci_device_is_present(pdev))
+                       csts = readl(dev->bar + NVME_REG_CSTS);
+               else
+                       csts = ~0;
 
                if (dev->ctrl.state == NVME_CTRL_LIVE ||
                    dev->ctrl.state == NVME_CTRL_RESETTING) {
@@ -2976,6 +2989,21 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
        return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
 }
 
+
+static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
+{
+       struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
+       struct nvme_subsystem *subsys = ctrl->subsys;
+
+       dev_err(ctrl->device,
+               "VID:DID %04x:%04x model:%.*s firmware:%.*s\n",
+               pdev->vendor, pdev->device,
+               nvme_strlen(subsys->model, sizeof(subsys->model)),
+               subsys->model, nvme_strlen(subsys->firmware_rev,
+                                          sizeof(subsys->firmware_rev)),
+               subsys->firmware_rev);
+}
+
 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
        .name                   = "pcie",
        .module                 = THIS_MODULE,
@@ -2987,6 +3015,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
        .free_ctrl              = nvme_pci_free_ctrl,
        .submit_async_event     = nvme_pci_submit_async_event,
        .get_address            = nvme_pci_get_address,
+       .print_device_info      = nvme_pci_print_device_info,
 };
 
 static int nvme_dev_map(struct nvme_dev *dev)
@@ -3421,7 +3450,8 @@ static const struct pci_device_id nvme_id_table[] = {
        { PCI_VDEVICE(REDHAT, 0x0010),  /* Qemu emulated controller */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x126f, 0x2263),   /* Silicon Motion unidentified */
-               .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
+               .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
+                               NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1bb1, 0x0100),   /* Seagate Nytro Flash Storage */
                .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
                                NVME_QUIRK_NO_NS_DESC_LIST, },
@@ -3437,22 +3467,40 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
                                NVME_QUIRK_DISABLE_WRITE_ZEROES|
                                NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+       { PCI_DEVICE(0x1987, 0x5012),   /* Phison E12 */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1987, 0x5016),   /* Phison E16 */
-               .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+               .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
+                               NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1b4b, 0x1092),   /* Lexar 256 GB SSD */
                .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
                                NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+       { PCI_DEVICE(0x1cc1, 0x33f8),   /* ADATA IM2P33F8ABR1 1 TB */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x10ec, 0x5762),   /* ADATA SX6000LNP */
-               .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+               .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
+                               NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1cc1, 0x8201),   /* ADATA SX8200PNP 512GB */
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
                                NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+        { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */
+               .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN },
        { PCI_DEVICE(0x1c5c, 0x1504),   /* SK Hynix PC400 */
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+       { PCI_DEVICE(0x1c5c, 0x174a),   /* SK Hynix P31 SSD */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x15b7, 0x2001),   /*  Sandisk Skyhawk */
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE(0x1d97, 0x2263),   /* SPCC */
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+       { PCI_DEVICE(0x144d, 0xa80b),   /* Samsung PM9B1 256G and 512G */
+               .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+       { PCI_DEVICE(0x144d, 0xa809),   /* Samsung MZALQ256HBJD 256G */
+               .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+       { PCI_DEVICE(0x1cc4, 0x6303),   /* UMIS RPJTJ512MGE1QDY 512G */
+               .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+       { PCI_DEVICE(0x1cc4, 0x6302),   /* UMIS RPJTJ256MGE1QDY 256G */
+               .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE(0x2646, 0x2262),   /* KINGSTON SKC2000 NVMe SSD */
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
        { PCI_DEVICE(0x2646, 0x2263),   /* KINGSTON A2000 NVMe SSD  */
@@ -3463,6 +3511,12 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1e4B, 0x1202),   /* MAXIO MAP1202 */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
+       { PCI_DEVICE(0x1cc1, 0x5350),   /* ADATA XPG GAMMIX S50 */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
+       { PCI_DEVICE(0x1e49, 0x0041),   /* ZHITAI TiPro7000 NVMe SSD */
+               .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+       { PCI_DEVICE(0xc0a9, 0x540a),   /* Crucial P2 */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
                .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
        { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
@@ -3483,10 +3537,6 @@ static const struct pci_device_id nvme_id_table[] = {
                                NVME_QUIRK_128_BYTES_SQES |
                                NVME_QUIRK_SHARED_TAGS |
                                NVME_QUIRK_SKIP_CID_GEN },
-       { PCI_DEVICE(0x144d, 0xa808),   /* Samsung X5 */
-               .driver_data =  NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
-                               NVME_QUIRK_NO_DEEPEST_PS |
-                               NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { 0, }
 };
index f2a5e1e..46c2dcf 100644 (file)
@@ -1048,6 +1048,14 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
        }
 }
 
+static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
+{
+       struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+
+       cancel_work_sync(&ctrl->err_work);
+       cancel_delayed_work_sync(&ctrl->reconnect_work);
+}
+
 static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
 {
        struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
@@ -2252,9 +2260,6 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
 
 static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
 {
-       cancel_work_sync(&ctrl->err_work);
-       cancel_delayed_work_sync(&ctrl->reconnect_work);
-
        nvme_rdma_teardown_io_queues(ctrl, shutdown);
        nvme_stop_admin_queue(&ctrl->ctrl);
        if (shutdown)
@@ -2304,6 +2309,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
        .submit_async_event     = nvme_rdma_submit_async_event,
        .delete_ctrl            = nvme_rdma_delete_ctrl,
        .get_address            = nvmf_get_address,
+       .stop_ctrl              = nvme_rdma_stop_ctrl,
 };
 
 /*
index bb67538..7a9e6ff 100644 (file)
@@ -1180,8 +1180,7 @@ done:
        } else if (ret < 0) {
                dev_err(queue->ctrl->ctrl.device,
                        "failed to send request %d\n", ret);
-               if (ret != -EPIPE && ret != -ECONNRESET)
-                       nvme_tcp_fail_request(queue->request);
+               nvme_tcp_fail_request(queue->request);
                nvme_tcp_done_send_req(queue);
        }
        return ret;
@@ -2194,9 +2193,6 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
 
 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
 {
-       cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
-       cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
-
        nvme_tcp_teardown_io_queues(ctrl, shutdown);
        nvme_stop_admin_queue(ctrl);
        if (shutdown)
@@ -2236,6 +2232,12 @@ out_fail:
        nvme_tcp_reconnect_or_remove(ctrl);
 }
 
+static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
+{
+       cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
+       cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
+}
+
 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
 {
        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
@@ -2557,6 +2559,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
        .submit_async_event     = nvme_tcp_submit_async_event,
        .delete_ctrl            = nvme_tcp_delete_ctrl,
        .get_address            = nvmf_get_address,
+       .stop_ctrl              = nvme_tcp_stop_ctrl,
 };
 
 static bool
index b5f8525..37c7f4c 100644 (file)
@@ -69,7 +69,7 @@ TRACE_EVENT(nvme_setup_cmd,
                __entry->metadata = !!blk_integrity_rq(req);
                __entry->fctype = cmd->fabrics.fctype;
                __assign_disk_name(__entry->disk, req->q->disk);
-               memcpy(__entry->cdw10, &cmd->common.cdw10,
+               memcpy(__entry->cdw10, &cmd->common.cdws,
                        sizeof(__entry->cdw10));
            ),
            TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%x, cmd=(%s %s)",
index e44b298..ff77c3d 100644 (file)
@@ -773,11 +773,31 @@ static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
 }
 CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
 
+static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
+               char *page)
+{
+       return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
+}
+
+static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
+               const char *page, size_t count)
+{
+       struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+       unsigned int clear_ids;
+
+       if (kstrtouint(page, 0, &clear_ids))
+               return -EINVAL;
+       subsys->clear_ids = clear_ids;
+       return count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
+
 static struct configfs_attribute *nvmet_passthru_attrs[] = {
        &nvmet_passthru_attr_device_path,
        &nvmet_passthru_attr_enable,
        &nvmet_passthru_attr_admin_timeout,
        &nvmet_passthru_attr_io_timeout,
+       &nvmet_passthru_attr_clear_ids,
        NULL,
 };
 
index 90e7532..c27660a 100644 (file)
@@ -1374,6 +1374,12 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
        ctrl->port = req->port;
        ctrl->ops = req->ops;
 
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
+       /* By default, set loop targets to clear IDS by default */
+       if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
+               subsys->clear_ids = 1;
+#endif
+
        INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
        INIT_LIST_HEAD(&ctrl->async_events);
        INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
index 6981875..2b3e571 100644 (file)
@@ -249,6 +249,7 @@ struct nvmet_subsys {
        struct config_group     passthru_group;
        unsigned int            admin_timeout;
        unsigned int            io_timeout;
+       unsigned int            clear_ids;
 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
 
 #ifdef CONFIG_BLK_DEV_ZONED
index b1f7efa..6f39a29 100644 (file)
@@ -30,6 +30,53 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
                ctrl->cap &= ~(1ULL << 43);
 }
 
+static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
+{
+       struct nvmet_ctrl *ctrl = req->sq->ctrl;
+       u16 status = NVME_SC_SUCCESS;
+       int pos, len;
+       bool csi_seen = false;
+       void *data;
+       u8 csi;
+
+       if (!ctrl->subsys->clear_ids)
+               return status;
+
+       data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
+       if (!data)
+               return NVME_SC_INTERNAL;
+
+       status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+       if (status)
+               goto out_free;
+
+       for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
+               struct nvme_ns_id_desc *cur = data + pos;
+
+               if (cur->nidl == 0)
+                       break;
+               if (cur->nidt == NVME_NIDT_CSI) {
+                       memcpy(&csi, cur + 1, NVME_NIDT_CSI_LEN);
+                       csi_seen = true;
+                       break;
+               }
+               len = sizeof(struct nvme_ns_id_desc) + cur->nidl;
+       }
+
+       memset(data, 0, NVME_IDENTIFY_DATA_SIZE);
+       if (csi_seen) {
+               struct nvme_ns_id_desc *cur = data;
+
+               cur->nidt = NVME_NIDT_CSI;
+               cur->nidl = NVME_NIDT_CSI_LEN;
+               memcpy(cur + 1, &csi, NVME_NIDT_CSI_LEN);
+       }
+       status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+out_free:
+       kfree(data);
+       return status;
+}
+
 static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
 {
        struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -152,6 +199,11 @@ static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
         */
        id->mc = 0;
 
+       if (req->sq->ctrl->subsys->clear_ids) {
+               memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
+               memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
+       }
+
        status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
 
 out_free:
@@ -176,6 +228,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
                case NVME_ID_CNS_NS:
                        nvmet_passthru_override_id_ns(req);
                        break;
+               case NVME_ID_CNS_NS_DESC_LIST:
+                       nvmet_passthru_override_id_descs(req);
+                       break;
                }
        } else if (status < 0)
                status = NVME_SC_INTERNAL;
index 2793554..0a95425 100644 (file)
@@ -405,7 +405,7 @@ err:
        return NVME_SC_INTERNAL;
 }
 
-static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
+static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
                struct nvmet_tcp_cmd *cmd)
 {
        ahash_request_set_crypt(hash, cmd->req.sg,
@@ -413,23 +413,6 @@ static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
        crypto_ahash_digest(hash);
 }
 
-static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
-               struct nvmet_tcp_cmd *cmd)
-{
-       struct scatterlist sg;
-       struct kvec *iov;
-       int i;
-
-       crypto_ahash_init(hash);
-       for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
-               sg_init_one(&sg, iov->iov_base, iov->iov_len);
-               ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
-               crypto_ahash_update(hash);
-       }
-       ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
-       crypto_ahash_final(hash);
-}
-
 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
 {
        struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
@@ -454,7 +437,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
 
        if (queue->data_digest) {
                pdu->hdr.flags |= NVME_TCP_F_DDGST;
-               nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
+               nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
        }
 
        if (cmd->queue->hdr_digest) {
@@ -1137,7 +1120,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
 {
        struct nvmet_tcp_queue *queue = cmd->queue;
 
-       nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
+       nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
        queue->offset = 0;
        queue->left = NVME_TCP_DIGEST_LENGTH;
        queue->rcv_state = NVMET_TCP_RECV_DDGST;
index 8d374cc..f2e58dd 100644 (file)
@@ -9,6 +9,7 @@
  *  Copyright (C) 2016  IBM Corporation
  */
 
+#include <linux/ima.h>
 #include <linux/kernel.h>
 #include <linux/kexec.h>
 #include <linux/memblock.h>
@@ -115,6 +116,7 @@ static int do_get_kexec_buffer(const void *prop, int len, unsigned long *addr,
        return 0;
 }
 
+#ifdef CONFIG_HAVE_IMA_KEXEC
 /**
  * ima_get_kexec_buffer - get IMA buffer from the previous kernel
  * @addr:      On successful return, set to point to the buffer contents.
@@ -122,16 +124,13 @@ static int do_get_kexec_buffer(const void *prop, int len, unsigned long *addr,
  *
  * Return: 0 on success, negative errno on error.
  */
-int ima_get_kexec_buffer(void **addr, size_t *size)
+int __init ima_get_kexec_buffer(void **addr, size_t *size)
 {
        int ret, len;
        unsigned long tmp_addr;
        size_t tmp_size;
        const void *prop;
 
-       if (!IS_ENABLED(CONFIG_HAVE_IMA_KEXEC))
-               return -ENOTSUPP;
-
        prop = of_get_property(of_chosen, "linux,ima-kexec-buffer", &len);
        if (!prop)
                return -ENOENT;
@@ -149,16 +148,13 @@ int ima_get_kexec_buffer(void **addr, size_t *size)
 /**
  * ima_free_kexec_buffer - free memory used by the IMA buffer
  */
-int ima_free_kexec_buffer(void)
+int __init ima_free_kexec_buffer(void)
 {
        int ret;
        unsigned long addr;
        size_t size;
        struct property *prop;
 
-       if (!IS_ENABLED(CONFIG_HAVE_IMA_KEXEC))
-               return -ENOTSUPP;
-
        prop = of_find_property(of_chosen, "linux,ima-kexec-buffer", NULL);
        if (!prop)
                return -ENOENT;
@@ -173,6 +169,7 @@ int ima_free_kexec_buffer(void)
 
        return memblock_phys_free(addr, size);
 }
+#endif
 
 /**
  * remove_ima_buffer - remove the IMA buffer property and reservation from @fdt
index 96e09fa..03b1309 100644 (file)
@@ -1139,7 +1139,7 @@ static void cci_pmu_start(struct perf_event *event, int pmu_flags)
 
        /*
         * To handle interrupt latency, we always reprogram the period
-        * regardlesss of PERF_EF_RELOAD.
+        * regardless of PERF_EF_RELOAD.
         */
        if (pmu_flags & PERF_EF_RELOAD)
                WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
@@ -1261,7 +1261,7 @@ static int validate_group(struct perf_event *event)
                 */
                .used_mask = mask,
        };
-       memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long));
+       bitmap_zero(mask, cci_pmu->num_cntrs);
 
        if (!validate_event(event->pmu, &fake_pmu, leader))
                return -EINVAL;
@@ -1629,10 +1629,9 @@ static struct cci_pmu *cci_pmu_alloc(struct device *dev)
                                             GFP_KERNEL);
        if (!cci_pmu->hw_events.events)
                return ERR_PTR(-ENOMEM);
-       cci_pmu->hw_events.used_mask = devm_kcalloc(dev,
-                                               BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)),
-                                               sizeof(*cci_pmu->hw_events.used_mask),
-                                               GFP_KERNEL);
+       cci_pmu->hw_events.used_mask = devm_bitmap_zalloc(dev,
+                                                         CCI_PMU_MAX_HW_CNTRS(model),
+                                                         GFP_KERNEL);
        if (!cci_pmu->hw_events.used_mask)
                return ERR_PTR(-ENOMEM);
 
index 40b352e..728d13d 100644 (file)
@@ -1250,7 +1250,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
        ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9);
 
        /* Get a convenient /sys/event_source/devices/ name */
-       ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL);
+       ccn->dt.id = ida_alloc(&arm_ccn_pmu_ida, GFP_KERNEL);
        if (ccn->dt.id == 0) {
                name = "ccn";
        } else {
@@ -1312,7 +1312,7 @@ error_pmu_register:
                                            &ccn->dt.node);
 error_set_affinity:
 error_choose_name:
-       ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
+       ida_free(&arm_ccn_pmu_ida, ccn->dt.id);
        for (i = 0; i < ccn->num_xps; i++)
                writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
        writel(0, ccn->dt.base + CCN_DT_PMCR);
@@ -1329,7 +1329,7 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
                writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
        writel(0, ccn->dt.base + CCN_DT_PMCR);
        perf_pmu_unregister(&ccn->dt.pmu);
-       ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
+       ida_free(&arm_ccn_pmu_ida, ccn->dt.id);
 }
 
 static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
index db670b2..b65a7d9 100644 (file)
 #include <asm/mmu.h>
 #include <asm/sysreg.h>
 
+/*
+ * Cache if the event is allowed to trace Context information.
+ * This allows us to perform the check, i.e, perfmon_capable(),
+ * in the context of the event owner, once, during the event_init().
+ */
+#define SPE_PMU_HW_FLAGS_CX                    BIT(0)
+
+static void set_spe_event_has_cx(struct perf_event *event)
+{
+       if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
+               event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
+}
+
+static bool get_spe_event_has_cx(struct perf_event *event)
+{
+       return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX);
+}
+
 #define ARM_SPE_BUF_PAD_BYTE                   0
 
 struct arm_spe_pmu_buf {
@@ -272,7 +290,7 @@ static u64 arm_spe_event_to_pmscr(struct perf_event *event)
        if (!attr->exclude_kernel)
                reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT);
 
-       if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
+       if (get_spe_event_has_cx(event))
                reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT);
 
        return reg;
@@ -709,10 +727,10 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
            !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
                return -EOPNOTSUPP;
 
+       set_spe_event_has_cx(event);
        reg = arm_spe_event_to_pmscr(event);
        if (!perfmon_capable() &&
            (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) |
-                   BIT(SYS_PMSCR_EL1_CX_SHIFT) |
                    BIT(SYS_PMSCR_EL1_PCT_SHIFT))))
                return -EACCES;
 
index b1b2a55..8e058e0 100644 (file)
@@ -611,7 +611,7 @@ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
                .dev = dev,
        };
 
-       pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
+       pmu->id = ida_alloc(&ddr_ida, GFP_KERNEL);
        return pmu->id;
 }
 
@@ -765,7 +765,7 @@ ddr_perf_err:
 cpuhp_instance_err:
        cpuhp_remove_multi_state(pmu->cpuhp_state);
 cpuhp_state_err:
-       ida_simple_remove(&ddr_ida, pmu->id);
+       ida_free(&ddr_ida, pmu->id);
        dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
        return ret;
 }
@@ -779,7 +779,7 @@ static int ddr_perf_remove(struct platform_device *pdev)
 
        perf_pmu_unregister(&pmu->pmu);
 
-       ida_simple_remove(&ddr_ida, pmu->id);
+       ida_free(&ddr_ida, pmu->id);
        return 0;
 }
 
index 5546218..171bfc1 100644 (file)
@@ -14,3 +14,13 @@ config HISI_PCIE_PMU
          RCiEP devices.
          Adds the PCIe PMU into perf events system for monitoring latency,
          bandwidth etc.
+
+config HNS3_PMU
+       tristate "HNS3 PERF PMU"
+       depends on ARM64 || COMPILE_TEST
+       depends on PCI
+       help
+         Provide support for HNS3 performance monitoring unit (PMU) RCiEP
+         devices.
+         Adds the HNS3 PMU into perf events system for monitoring latency,
+         bandwidth etc.
index 6be8351..4d2c9ab 100644 (file)
@@ -4,3 +4,4 @@ obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \
                          hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o
 
 obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o
+obj-$(CONFIG_HNS3_PMU) += hns3_pmu.o
index 62299ab..50d0c0a 100644 (file)
@@ -516,21 +516,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
                                      "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id,
                                      ddrc_pmu->index_id);
 
-       ddrc_pmu->pmu = (struct pmu) {
-               .name           = name,
-               .module         = THIS_MODULE,
-               .task_ctx_nr    = perf_invalid_context,
-               .event_init     = hisi_uncore_pmu_event_init,
-               .pmu_enable     = hisi_uncore_pmu_enable,
-               .pmu_disable    = hisi_uncore_pmu_disable,
-               .add            = hisi_uncore_pmu_add,
-               .del            = hisi_uncore_pmu_del,
-               .start          = hisi_uncore_pmu_start,
-               .stop           = hisi_uncore_pmu_stop,
-               .read           = hisi_uncore_pmu_read,
-               .attr_groups    = ddrc_pmu->pmu_events.attr_groups,
-               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
-       };
+       hisi_pmu_init(&ddrc_pmu->pmu, name, ddrc_pmu->pmu_events.attr_groups, THIS_MODULE);
 
        ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
        if (ret) {
index 3935131..13017b3 100644 (file)
@@ -519,21 +519,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
 
        name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
                              hha_pmu->sccl_id, hha_pmu->index_id);
-       hha_pmu->pmu = (struct pmu) {
-               .name           = name,
-               .module         = THIS_MODULE,
-               .task_ctx_nr    = perf_invalid_context,
-               .event_init     = hisi_uncore_pmu_event_init,
-               .pmu_enable     = hisi_uncore_pmu_enable,
-               .pmu_disable    = hisi_uncore_pmu_disable,
-               .add            = hisi_uncore_pmu_add,
-               .del            = hisi_uncore_pmu_del,
-               .start          = hisi_uncore_pmu_start,
-               .stop           = hisi_uncore_pmu_stop,
-               .read           = hisi_uncore_pmu_read,
-               .attr_groups    = hha_pmu->pmu_events.attr_groups,
-               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
-       };
+       hisi_pmu_init(&hha_pmu->pmu, name, hha_pmu->pmu_events.attr_groups, THIS_MODULE);
 
        ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
        if (ret) {
index 560ab96..2995f36 100644 (file)
@@ -557,21 +557,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
         */
        name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
                              l3c_pmu->sccl_id, l3c_pmu->ccl_id);
-       l3c_pmu->pmu = (struct pmu) {
-               .name           = name,
-               .module         = THIS_MODULE,
-               .task_ctx_nr    = perf_invalid_context,
-               .event_init     = hisi_uncore_pmu_event_init,
-               .pmu_enable     = hisi_uncore_pmu_enable,
-               .pmu_disable    = hisi_uncore_pmu_disable,
-               .add            = hisi_uncore_pmu_add,
-               .del            = hisi_uncore_pmu_del,
-               .start          = hisi_uncore_pmu_start,
-               .stop           = hisi_uncore_pmu_stop,
-               .read           = hisi_uncore_pmu_read,
-               .attr_groups    = l3c_pmu->pmu_events.attr_groups,
-               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
-       };
+       hisi_pmu_init(&l3c_pmu->pmu, name, l3c_pmu->pmu_events.attr_groups, THIS_MODULE);
 
        ret = perf_pmu_register(&l3c_pmu->pmu, name, -1);
        if (ret) {
index a0ee84d..47d3cc9 100644 (file)
@@ -412,21 +412,7 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
                return ret;
        }
 
-       pa_pmu->pmu = (struct pmu) {
-               .module         = THIS_MODULE,
-               .task_ctx_nr    = perf_invalid_context,
-               .event_init     = hisi_uncore_pmu_event_init,
-               .pmu_enable     = hisi_uncore_pmu_enable,
-               .pmu_disable    = hisi_uncore_pmu_disable,
-               .add            = hisi_uncore_pmu_add,
-               .del            = hisi_uncore_pmu_del,
-               .start          = hisi_uncore_pmu_start,
-               .stop           = hisi_uncore_pmu_stop,
-               .read           = hisi_uncore_pmu_read,
-               .attr_groups    = pa_pmu->pmu_events.attr_groups,
-               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
-       };
-
+       hisi_pmu_init(&pa_pmu->pmu, name, pa_pmu->pmu_events.attr_groups, THIS_MODULE);
        ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
        if (ret) {
                dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
index 980b9ee..fbc8a93 100644 (file)
@@ -531,4 +531,22 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
 }
 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu);
 
+void hisi_pmu_init(struct pmu *pmu, const char *name,
+               const struct attribute_group **attr_groups, struct module *module)
+{
+       pmu->name               = name;
+       pmu->module             = module;
+       pmu->task_ctx_nr        = perf_invalid_context;
+       pmu->event_init         = hisi_uncore_pmu_event_init;
+       pmu->pmu_enable         = hisi_uncore_pmu_enable;
+       pmu->pmu_disable        = hisi_uncore_pmu_disable;
+       pmu->add                = hisi_uncore_pmu_add;
+       pmu->del                = hisi_uncore_pmu_del;
+       pmu->start              = hisi_uncore_pmu_start;
+       pmu->stop               = hisi_uncore_pmu_stop;
+       pmu->read               = hisi_uncore_pmu_read;
+       pmu->attr_groups        = attr_groups;
+}
+EXPORT_SYMBOL_GPL(hisi_pmu_init);
+
 MODULE_LICENSE("GPL v2");
index 96eedda..b59de33 100644 (file)
@@ -121,4 +121,6 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
 int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
                             struct platform_device *pdev);
 
+void hisi_pmu_init(struct pmu *pmu, const char *name,
+               const struct attribute_group **attr_groups, struct module *module);
 #endif /* __HISI_UNCORE_PMU_H__ */
index 6aedc30..b9c79f1 100644 (file)
@@ -445,20 +445,7 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
                return ret;
        }
 
-       sllc_pmu->pmu = (struct pmu) {
-               .module         = THIS_MODULE,
-               .task_ctx_nr    = perf_invalid_context,
-               .event_init     = hisi_uncore_pmu_event_init,
-               .pmu_enable     = hisi_uncore_pmu_enable,
-               .pmu_disable    = hisi_uncore_pmu_disable,
-               .add            = hisi_uncore_pmu_add,
-               .del            = hisi_uncore_pmu_del,
-               .start          = hisi_uncore_pmu_start,
-               .stop           = hisi_uncore_pmu_stop,
-               .read           = hisi_uncore_pmu_read,
-               .attr_groups    = sllc_pmu->pmu_events.attr_groups,
-               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
-       };
+       hisi_pmu_init(&sllc_pmu->pmu, name, sllc_pmu->pmu_events.attr_groups, THIS_MODULE);
 
        ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
        if (ret) {
diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c
new file mode 100644 (file)
index 0000000..e0457d8
--- /dev/null
@@ -0,0 +1,1671 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This driver adds support for HNS3 PMU iEP device. Related perf events are
+ * bandwidth, latency, packet rate, interrupt rate etc.
+ *
+ * Copyright (C) 2022 HiSilicon Limited
+ */
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bug.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci-epf.h>
+#include <linux/perf_event.h>
+#include <linux/smp.h>
+
+/* registers offset address */
+#define HNS3_PMU_REG_GLOBAL_CTRL               0x0000
+#define HNS3_PMU_REG_CLOCK_FREQ                        0x0020
+#define HNS3_PMU_REG_BDF                       0x0fe0
+#define HNS3_PMU_REG_VERSION                   0x0fe4
+#define HNS3_PMU_REG_DEVICE_ID                 0x0fe8
+
+#define HNS3_PMU_REG_EVENT_OFFSET              0x1000
+#define HNS3_PMU_REG_EVENT_SIZE                        0x1000
+#define HNS3_PMU_REG_EVENT_CTRL_LOW            0x00
+#define HNS3_PMU_REG_EVENT_CTRL_HIGH           0x04
+#define HNS3_PMU_REG_EVENT_INTR_STATUS         0x08
+#define HNS3_PMU_REG_EVENT_INTR_MASK           0x0c
+#define HNS3_PMU_REG_EVENT_COUNTER             0x10
+#define HNS3_PMU_REG_EVENT_EXT_COUNTER         0x18
+#define HNS3_PMU_REG_EVENT_QID_CTRL            0x28
+#define HNS3_PMU_REG_EVENT_QID_PARA            0x2c
+
+#define HNS3_PMU_FILTER_SUPPORT_GLOBAL         BIT(0)
+#define HNS3_PMU_FILTER_SUPPORT_PORT           BIT(1)
+#define HNS3_PMU_FILTER_SUPPORT_PORT_TC                BIT(2)
+#define HNS3_PMU_FILTER_SUPPORT_FUNC           BIT(3)
+#define HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE     BIT(4)
+#define HNS3_PMU_FILTER_SUPPORT_FUNC_INTR      BIT(5)
+
+#define HNS3_PMU_FILTER_ALL_TC                 0xf
+#define HNS3_PMU_FILTER_ALL_QUEUE              0xffff
+
+#define HNS3_PMU_CTRL_SUBEVENT_S               4
+#define HNS3_PMU_CTRL_FILTER_MODE_S            24
+
+#define HNS3_PMU_GLOBAL_START                  BIT(0)
+
+#define HNS3_PMU_EVENT_STATUS_RESET            BIT(11)
+#define HNS3_PMU_EVENT_EN                      BIT(12)
+#define HNS3_PMU_EVENT_OVERFLOW_RESTART                BIT(15)
+
+#define HNS3_PMU_QID_PARA_FUNC_S               0
+#define HNS3_PMU_QID_PARA_QUEUE_S              16
+
+#define HNS3_PMU_QID_CTRL_REQ_ENABLE           BIT(0)
+#define HNS3_PMU_QID_CTRL_DONE                 BIT(1)
+#define HNS3_PMU_QID_CTRL_MISS                 BIT(2)
+
+#define HNS3_PMU_INTR_MASK_OVERFLOW            BIT(1)
+
+#define HNS3_PMU_MAX_HW_EVENTS                 8
+
+/*
+ * Each hardware event contains two registers (counter and ext_counter) for
+ * bandwidth, packet rate, latency and interrupt rate. These two registers will
+ * be triggered to run at the same when a hardware event is enabled. The meaning
+ * of counter and ext_counter of different event type are different, their
+ * meaning show as follow:
+ *
+ * +----------------+------------------+---------------+
+ * |   event type   |     counter      |  ext_counter  |
+ * +----------------+------------------+---------------+
+ * | bandwidth      | byte number      | cycle number  |
+ * +----------------+------------------+---------------+
+ * | packet rate    | packet number    | cycle number  |
+ * +----------------+------------------+---------------+
+ * | latency        | cycle number     | packet number |
+ * +----------------+------------------+---------------+
+ * | interrupt rate | interrupt number | cycle number  |
+ * +----------------+------------------+---------------+
+ *
+ * The cycle number indicates increment of counter of hardware timer, the
+ * frequency of hardware timer can be read from hw_clk_freq file.
+ *
+ * Performance of each hardware event is calculated by: counter / ext_counter.
+ *
+ * Since processing of data is preferred to be done in userspace, we expose
+ * ext_counter as a separate event for userspace and use bit 16 to indicate it.
+ * For example, event 0x00001 and 0x10001 are actually one event for hardware
+ * because bit 0-15 are same. If the bit 16 of one event is 0 means to read
+ * counter register, otherwise means to read ext_counter register.
+ */
+/* bandwidth events */
+#define HNS3_PMU_EVT_BW_SSU_EGU_BYTE_NUM               0x00001
+#define HNS3_PMU_EVT_BW_SSU_EGU_TIME                   0x10001
+#define HNS3_PMU_EVT_BW_SSU_RPU_BYTE_NUM               0x00002
+#define HNS3_PMU_EVT_BW_SSU_RPU_TIME                   0x10002
+#define HNS3_PMU_EVT_BW_SSU_ROCE_BYTE_NUM              0x00003
+#define HNS3_PMU_EVT_BW_SSU_ROCE_TIME                  0x10003
+#define HNS3_PMU_EVT_BW_ROCE_SSU_BYTE_NUM              0x00004
+#define HNS3_PMU_EVT_BW_ROCE_SSU_TIME                  0x10004
+#define HNS3_PMU_EVT_BW_TPU_SSU_BYTE_NUM               0x00005
+#define HNS3_PMU_EVT_BW_TPU_SSU_TIME                   0x10005
+#define HNS3_PMU_EVT_BW_RPU_RCBRX_BYTE_NUM             0x00006
+#define HNS3_PMU_EVT_BW_RPU_RCBRX_TIME                 0x10006
+#define HNS3_PMU_EVT_BW_RCBTX_TXSCH_BYTE_NUM           0x00008
+#define HNS3_PMU_EVT_BW_RCBTX_TXSCH_TIME               0x10008
+#define HNS3_PMU_EVT_BW_WR_FBD_BYTE_NUM                        0x00009
+#define HNS3_PMU_EVT_BW_WR_FBD_TIME                    0x10009
+#define HNS3_PMU_EVT_BW_WR_EBD_BYTE_NUM                        0x0000a
+#define HNS3_PMU_EVT_BW_WR_EBD_TIME                    0x1000a
+#define HNS3_PMU_EVT_BW_RD_FBD_BYTE_NUM                        0x0000b
+#define HNS3_PMU_EVT_BW_RD_FBD_TIME                    0x1000b
+#define HNS3_PMU_EVT_BW_RD_EBD_BYTE_NUM                        0x0000c
+#define HNS3_PMU_EVT_BW_RD_EBD_TIME                    0x1000c
+#define HNS3_PMU_EVT_BW_RD_PAY_M0_BYTE_NUM             0x0000d
+#define HNS3_PMU_EVT_BW_RD_PAY_M0_TIME                 0x1000d
+#define HNS3_PMU_EVT_BW_RD_PAY_M1_BYTE_NUM             0x0000e
+#define HNS3_PMU_EVT_BW_RD_PAY_M1_TIME                 0x1000e
+#define HNS3_PMU_EVT_BW_WR_PAY_M0_BYTE_NUM             0x0000f
+#define HNS3_PMU_EVT_BW_WR_PAY_M0_TIME                 0x1000f
+#define HNS3_PMU_EVT_BW_WR_PAY_M1_BYTE_NUM             0x00010
+#define HNS3_PMU_EVT_BW_WR_PAY_M1_TIME                 0x10010
+
+/* packet rate events */
+#define HNS3_PMU_EVT_PPS_IGU_SSU_PACKET_NUM            0x00100
+#define HNS3_PMU_EVT_PPS_IGU_SSU_TIME                  0x10100
+#define HNS3_PMU_EVT_PPS_SSU_EGU_PACKET_NUM            0x00101
+#define HNS3_PMU_EVT_PPS_SSU_EGU_TIME                  0x10101
+#define HNS3_PMU_EVT_PPS_SSU_RPU_PACKET_NUM            0x00102
+#define HNS3_PMU_EVT_PPS_SSU_RPU_TIME                  0x10102
+#define HNS3_PMU_EVT_PPS_SSU_ROCE_PACKET_NUM           0x00103
+#define HNS3_PMU_EVT_PPS_SSU_ROCE_TIME                 0x10103
+#define HNS3_PMU_EVT_PPS_ROCE_SSU_PACKET_NUM           0x00104
+#define HNS3_PMU_EVT_PPS_ROCE_SSU_TIME                 0x10104
+#define HNS3_PMU_EVT_PPS_TPU_SSU_PACKET_NUM            0x00105
+#define HNS3_PMU_EVT_PPS_TPU_SSU_TIME                  0x10105
+#define HNS3_PMU_EVT_PPS_RPU_RCBRX_PACKET_NUM          0x00106
+#define HNS3_PMU_EVT_PPS_RPU_RCBRX_TIME                        0x10106
+#define HNS3_PMU_EVT_PPS_RCBTX_TPU_PACKET_NUM          0x00107
+#define HNS3_PMU_EVT_PPS_RCBTX_TPU_TIME                        0x10107
+#define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_PACKET_NUM                0x00108
+#define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_TIME              0x10108
+#define HNS3_PMU_EVT_PPS_WR_FBD_PACKET_NUM             0x00109
+#define HNS3_PMU_EVT_PPS_WR_FBD_TIME                   0x10109
+#define HNS3_PMU_EVT_PPS_WR_EBD_PACKET_NUM             0x0010a
+#define HNS3_PMU_EVT_PPS_WR_EBD_TIME                   0x1010a
+#define HNS3_PMU_EVT_PPS_RD_FBD_PACKET_NUM             0x0010b
+#define HNS3_PMU_EVT_PPS_RD_FBD_TIME                   0x1010b
+#define HNS3_PMU_EVT_PPS_RD_EBD_PACKET_NUM             0x0010c
+#define HNS3_PMU_EVT_PPS_RD_EBD_TIME                   0x1010c
+#define HNS3_PMU_EVT_PPS_RD_PAY_M0_PACKET_NUM          0x0010d
+#define HNS3_PMU_EVT_PPS_RD_PAY_M0_TIME                        0x1010d
+#define HNS3_PMU_EVT_PPS_RD_PAY_M1_PACKET_NUM          0x0010e
+#define HNS3_PMU_EVT_PPS_RD_PAY_M1_TIME                        0x1010e
+#define HNS3_PMU_EVT_PPS_WR_PAY_M0_PACKET_NUM          0x0010f
+#define HNS3_PMU_EVT_PPS_WR_PAY_M0_TIME                        0x1010f
+#define HNS3_PMU_EVT_PPS_WR_PAY_M1_PACKET_NUM          0x00110
+#define HNS3_PMU_EVT_PPS_WR_PAY_M1_TIME                        0x10110
+#define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_PACKET_NUM      0x00111
+#define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_TIME            0x10111
+#define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_PACKET_NUM      0x00112
+#define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_TIME            0x10112
+
+/* latency events */
+#define HNS3_PMU_EVT_DLY_TX_PUSH_TIME                  0x00202
+#define HNS3_PMU_EVT_DLY_TX_PUSH_PACKET_NUM            0x10202
+#define HNS3_PMU_EVT_DLY_TX_TIME                       0x00204
+#define HNS3_PMU_EVT_DLY_TX_PACKET_NUM                 0x10204
+#define HNS3_PMU_EVT_DLY_SSU_TX_NIC_TIME               0x00206
+#define HNS3_PMU_EVT_DLY_SSU_TX_NIC_PACKET_NUM         0x10206
+#define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_TIME              0x00207
+#define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_PACKET_NUM                0x10207
+#define HNS3_PMU_EVT_DLY_SSU_RX_NIC_TIME               0x00208
+#define HNS3_PMU_EVT_DLY_SSU_RX_NIC_PACKET_NUM         0x10208
+#define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_TIME              0x00209
+#define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_PACKET_NUM                0x10209
+#define HNS3_PMU_EVT_DLY_RPU_TIME                      0x0020e
+#define HNS3_PMU_EVT_DLY_RPU_PACKET_NUM                        0x1020e
+#define HNS3_PMU_EVT_DLY_TPU_TIME                      0x0020f
+#define HNS3_PMU_EVT_DLY_TPU_PACKET_NUM                        0x1020f
+#define HNS3_PMU_EVT_DLY_RPE_TIME                      0x00210
+#define HNS3_PMU_EVT_DLY_RPE_PACKET_NUM                        0x10210
+#define HNS3_PMU_EVT_DLY_TPE_TIME                      0x00211
+#define HNS3_PMU_EVT_DLY_TPE_PACKET_NUM                        0x10211
+#define HNS3_PMU_EVT_DLY_TPE_PUSH_TIME                 0x00212
+#define HNS3_PMU_EVT_DLY_TPE_PUSH_PACKET_NUM           0x10212
+#define HNS3_PMU_EVT_DLY_WR_FBD_TIME                   0x00213
+#define HNS3_PMU_EVT_DLY_WR_FBD_PACKET_NUM             0x10213
+#define HNS3_PMU_EVT_DLY_WR_EBD_TIME                   0x00214
+#define HNS3_PMU_EVT_DLY_WR_EBD_PACKET_NUM             0x10214
+#define HNS3_PMU_EVT_DLY_RD_FBD_TIME                   0x00215
+#define HNS3_PMU_EVT_DLY_RD_FBD_PACKET_NUM             0x10215
+#define HNS3_PMU_EVT_DLY_RD_EBD_TIME                   0x00216
+#define HNS3_PMU_EVT_DLY_RD_EBD_PACKET_NUM             0x10216
+#define HNS3_PMU_EVT_DLY_RD_PAY_M0_TIME                        0x00217
+#define HNS3_PMU_EVT_DLY_RD_PAY_M0_PACKET_NUM          0x10217
+#define HNS3_PMU_EVT_DLY_RD_PAY_M1_TIME                        0x00218
+#define HNS3_PMU_EVT_DLY_RD_PAY_M1_PACKET_NUM          0x10218
+#define HNS3_PMU_EVT_DLY_WR_PAY_M0_TIME                        0x00219
+#define HNS3_PMU_EVT_DLY_WR_PAY_M0_PACKET_NUM          0x10219
+#define HNS3_PMU_EVT_DLY_WR_PAY_M1_TIME                        0x0021a
+#define HNS3_PMU_EVT_DLY_WR_PAY_M1_PACKET_NUM          0x1021a
+#define HNS3_PMU_EVT_DLY_MSIX_WRITE_TIME               0x0021c
+#define HNS3_PMU_EVT_DLY_MSIX_WRITE_PACKET_NUM         0x1021c
+
+/* interrupt rate events */
+#define HNS3_PMU_EVT_PPS_MSIX_NIC_INTR_NUM             0x00300
+#define HNS3_PMU_EVT_PPS_MSIX_NIC_TIME                 0x10300
+
+/* filter mode supported by each bandwidth event */
+#define HNS3_PMU_FILTER_BW_SSU_EGU             0x07
+#define HNS3_PMU_FILTER_BW_SSU_RPU             0x1f
+#define HNS3_PMU_FILTER_BW_SSU_ROCE            0x0f
+#define HNS3_PMU_FILTER_BW_ROCE_SSU            0x0f
+#define HNS3_PMU_FILTER_BW_TPU_SSU             0x1f
+#define HNS3_PMU_FILTER_BW_RPU_RCBRX           0x11
+#define HNS3_PMU_FILTER_BW_RCBTX_TXSCH         0x11
+#define HNS3_PMU_FILTER_BW_WR_FBD              0x1b
+#define HNS3_PMU_FILTER_BW_WR_EBD              0x11
+#define HNS3_PMU_FILTER_BW_RD_FBD              0x01
+#define HNS3_PMU_FILTER_BW_RD_EBD              0x1b
+#define HNS3_PMU_FILTER_BW_RD_PAY_M0           0x01
+#define HNS3_PMU_FILTER_BW_RD_PAY_M1           0x01
+#define HNS3_PMU_FILTER_BW_WR_PAY_M0           0x01
+#define HNS3_PMU_FILTER_BW_WR_PAY_M1           0x01
+
+/* filter mode supported by each packet rate event */
+#define HNS3_PMU_FILTER_PPS_IGU_SSU            0x07
+#define HNS3_PMU_FILTER_PPS_SSU_EGU            0x07
+#define HNS3_PMU_FILTER_PPS_SSU_RPU            0x1f
+#define HNS3_PMU_FILTER_PPS_SSU_ROCE           0x0f
+#define HNS3_PMU_FILTER_PPS_ROCE_SSU           0x0f
+#define HNS3_PMU_FILTER_PPS_TPU_SSU            0x1f
+#define HNS3_PMU_FILTER_PPS_RPU_RCBRX          0x11
+#define HNS3_PMU_FILTER_PPS_RCBTX_TPU          0x1f
+#define HNS3_PMU_FILTER_PPS_RCBTX_TXSCH                0x11
+#define HNS3_PMU_FILTER_PPS_WR_FBD             0x1b
+#define HNS3_PMU_FILTER_PPS_WR_EBD             0x11
+#define HNS3_PMU_FILTER_PPS_RD_FBD             0x01
+#define HNS3_PMU_FILTER_PPS_RD_EBD             0x1b
+#define HNS3_PMU_FILTER_PPS_RD_PAY_M0          0x01
+#define HNS3_PMU_FILTER_PPS_RD_PAY_M1          0x01
+#define HNS3_PMU_FILTER_PPS_WR_PAY_M0          0x01
+#define HNS3_PMU_FILTER_PPS_WR_PAY_M1          0x01
+#define HNS3_PMU_FILTER_PPS_NICROH_TX_PRE      0x01
+#define HNS3_PMU_FILTER_PPS_NICROH_RX_PRE      0x01
+
+/* filter mode supported by each latency event */
+#define HNS3_PMU_FILTER_DLY_TX_PUSH            0x01
+#define HNS3_PMU_FILTER_DLY_TX                 0x01
+#define HNS3_PMU_FILTER_DLY_SSU_TX_NIC         0x07
+#define HNS3_PMU_FILTER_DLY_SSU_TX_ROCE                0x07
+#define HNS3_PMU_FILTER_DLY_SSU_RX_NIC         0x07
+#define HNS3_PMU_FILTER_DLY_SSU_RX_ROCE                0x07
+#define HNS3_PMU_FILTER_DLY_RPU                        0x11
+#define HNS3_PMU_FILTER_DLY_TPU                        0x1f
+#define HNS3_PMU_FILTER_DLY_RPE                        0x01
+#define HNS3_PMU_FILTER_DLY_TPE                        0x0b
+#define HNS3_PMU_FILTER_DLY_TPE_PUSH           0x1b
+#define HNS3_PMU_FILTER_DLY_WR_FBD             0x1b
+#define HNS3_PMU_FILTER_DLY_WR_EBD             0x11
+#define HNS3_PMU_FILTER_DLY_RD_FBD             0x01
+#define HNS3_PMU_FILTER_DLY_RD_EBD             0x1b
+#define HNS3_PMU_FILTER_DLY_RD_PAY_M0          0x01
+#define HNS3_PMU_FILTER_DLY_RD_PAY_M1          0x01
+#define HNS3_PMU_FILTER_DLY_WR_PAY_M0          0x01
+#define HNS3_PMU_FILTER_DLY_WR_PAY_M1          0x01
+#define HNS3_PMU_FILTER_DLY_MSIX_WRITE         0x01
+
+/* filter mode supported by each interrupt rate event */
+#define HNS3_PMU_FILTER_INTR_MSIX_NIC          0x01
+
+enum hns3_pmu_hw_filter_mode {
+       HNS3_PMU_HW_FILTER_GLOBAL,
+       HNS3_PMU_HW_FILTER_PORT,
+       HNS3_PMU_HW_FILTER_PORT_TC,
+       HNS3_PMU_HW_FILTER_FUNC,
+       HNS3_PMU_HW_FILTER_FUNC_QUEUE,
+       HNS3_PMU_HW_FILTER_FUNC_INTR,
+};
+
+struct hns3_pmu_event_attr {
+       u32 event;
+       u16 filter_support;
+};
+
+struct hns3_pmu {
+       struct perf_event *hw_events[HNS3_PMU_MAX_HW_EVENTS];
+       struct hlist_node node;
+       struct pci_dev *pdev;
+       struct pmu pmu;
+       void __iomem *base;
+       int irq;
+       int on_cpu;
+       u32 identifier;
+       u32 hw_clk_freq; /* hardware clock frequency of PMU */
+       /* maximum and minimum bdf allowed by PMU */
+       u16 bdf_min;
+       u16 bdf_max;
+};
+
+#define to_hns3_pmu(p)  (container_of((p), struct hns3_pmu, pmu))
+
+#define GET_PCI_DEVFN(bdf)  ((bdf) & 0xff)
+
+#define FILTER_CONDITION_PORT(port) ((1 << (port)) & 0xff)
+#define FILTER_CONDITION_PORT_TC(port, tc) (((port) << 3) | ((tc) & 0x07))
+#define FILTER_CONDITION_FUNC_INTR(func, intr) (((intr) << 8) | (func))
+
+#define HNS3_PMU_FILTER_ATTR(_name, _config, _start, _end)               \
+       static inline u64 hns3_pmu_get_##_name(struct perf_event *event) \
+       {                                                                \
+               return FIELD_GET(GENMASK_ULL(_end, _start),              \
+                                event->attr._config);                   \
+       }
+
+HNS3_PMU_FILTER_ATTR(subevent, config, 0, 7);
+HNS3_PMU_FILTER_ATTR(event_type, config, 8, 15);
+HNS3_PMU_FILTER_ATTR(ext_counter_used, config, 16, 16);
+HNS3_PMU_FILTER_ATTR(port, config1, 0, 3);
+HNS3_PMU_FILTER_ATTR(tc, config1, 4, 7);
+HNS3_PMU_FILTER_ATTR(bdf, config1, 8, 23);
+HNS3_PMU_FILTER_ATTR(queue, config1, 24, 39);
+HNS3_PMU_FILTER_ATTR(intr, config1, 40, 51);
+HNS3_PMU_FILTER_ATTR(global, config1, 52, 52);
+
+#define HNS3_BW_EVT_BYTE_NUM(_name)    (&(struct hns3_pmu_event_attr) {\
+       HNS3_PMU_EVT_BW_##_name##_BYTE_NUM,                             \
+       HNS3_PMU_FILTER_BW_##_name})
+#define HNS3_BW_EVT_TIME(_name)                (&(struct hns3_pmu_event_attr) {\
+       HNS3_PMU_EVT_BW_##_name##_TIME,                                 \
+       HNS3_PMU_FILTER_BW_##_name})
+#define HNS3_PPS_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\
+       HNS3_PMU_EVT_PPS_##_name##_PACKET_NUM,                          \
+       HNS3_PMU_FILTER_PPS_##_name})
+#define HNS3_PPS_EVT_TIME(_name)       (&(struct hns3_pmu_event_attr) {\
+       HNS3_PMU_EVT_PPS_##_name##_TIME,                                \
+       HNS3_PMU_FILTER_PPS_##_name})
+#define HNS3_DLY_EVT_TIME(_name)       (&(struct hns3_pmu_event_attr) {\
+       HNS3_PMU_EVT_DLY_##_name##_TIME,                                \
+       HNS3_PMU_FILTER_DLY_##_name})
+#define HNS3_DLY_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\
+       HNS3_PMU_EVT_DLY_##_name##_PACKET_NUM,                          \
+       HNS3_PMU_FILTER_DLY_##_name})
+#define HNS3_INTR_EVT_INTR_NUM(_name)  (&(struct hns3_pmu_event_attr) {\
+       HNS3_PMU_EVT_PPS_##_name##_INTR_NUM,                            \
+       HNS3_PMU_FILTER_INTR_##_name})
+#define HNS3_INTR_EVT_TIME(_name)      (&(struct hns3_pmu_event_attr) {\
+       HNS3_PMU_EVT_PPS_##_name##_TIME,                                \
+       HNS3_PMU_FILTER_INTR_##_name})
+
+static ssize_t hns3_pmu_format_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct dev_ext_attribute *eattr;
+
+       eattr = container_of(attr, struct dev_ext_attribute, attr);
+
+       return sysfs_emit(buf, "%s\n", (char *)eattr->var);
+}
+
+static ssize_t hns3_pmu_event_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct hns3_pmu_event_attr *event;
+       struct dev_ext_attribute *eattr;
+
+       eattr = container_of(attr, struct dev_ext_attribute, attr);
+       event = eattr->var;
+
+       return sysfs_emit(buf, "config=0x%x\n", event->event);
+}
+
+static ssize_t hns3_pmu_filter_mode_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       struct hns3_pmu_event_attr *event;
+       struct dev_ext_attribute *eattr;
+       int len;
+
+       eattr = container_of(attr, struct dev_ext_attribute, attr);
+       event = eattr->var;
+
+       len = sysfs_emit_at(buf, 0, "filter mode supported: ");
+       if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL)
+               len += sysfs_emit_at(buf, len, "global ");
+       if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT)
+               len += sysfs_emit_at(buf, len, "port ");
+       if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC)
+               len += sysfs_emit_at(buf, len, "port-tc ");
+       if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC)
+               len += sysfs_emit_at(buf, len, "func ");
+       if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE)
+               len += sysfs_emit_at(buf, len, "func-queue ");
+       if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR)
+               len += sysfs_emit_at(buf, len, "func-intr ");
+
+       len += sysfs_emit_at(buf, len, "\n");
+
+       return len;
+}
+
+#define HNS3_PMU_ATTR(_name, _func, _config)                           \
+       (&((struct dev_ext_attribute[]) {                               \
+               { __ATTR(_name, 0444, _func, NULL), (void *)_config }   \
+       })[0].attr.attr)
+
+#define HNS3_PMU_FORMAT_ATTR(_name, _format) \
+       HNS3_PMU_ATTR(_name, hns3_pmu_format_show, (void *)_format)
+#define HNS3_PMU_EVENT_ATTR(_name, _event) \
+       HNS3_PMU_ATTR(_name, hns3_pmu_event_show, (void *)_event)
+#define HNS3_PMU_FLT_MODE_ATTR(_name, _event) \
+       HNS3_PMU_ATTR(_name, hns3_pmu_filter_mode_show, (void *)_event)
+
+#define HNS3_PMU_BW_EVT_PAIR(_name, _macro) \
+       HNS3_PMU_EVENT_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \
+       HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro))
+#define HNS3_PMU_PPS_EVT_PAIR(_name, _macro) \
+       HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \
+       HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro))
+#define HNS3_PMU_DLY_EVT_PAIR(_name, _macro) \
+       HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \
+       HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro))
+#define HNS3_PMU_INTR_EVT_PAIR(_name, _macro) \
+       HNS3_PMU_EVENT_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \
+       HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro))
+
+#define HNS3_PMU_BW_FLT_MODE_PAIR(_name, _macro) \
+       HNS3_PMU_FLT_MODE_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \
+       HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro))
+#define HNS3_PMU_PPS_FLT_MODE_PAIR(_name, _macro) \
+       HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \
+       HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro))
+#define HNS3_PMU_DLY_FLT_MODE_PAIR(_name, _macro) \
+       HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \
+       HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro))
+#define HNS3_PMU_INTR_FLT_MODE_PAIR(_name, _macro) \
+       HNS3_PMU_FLT_MODE_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \
+       HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro))
+
+static u8 hns3_pmu_hw_filter_modes[] = {
+       HNS3_PMU_HW_FILTER_GLOBAL,
+       HNS3_PMU_HW_FILTER_PORT,
+       HNS3_PMU_HW_FILTER_PORT_TC,
+       HNS3_PMU_HW_FILTER_FUNC,
+       HNS3_PMU_HW_FILTER_FUNC_QUEUE,
+       HNS3_PMU_HW_FILTER_FUNC_INTR,
+};
+
+#define HNS3_PMU_SET_HW_FILTER(_hwc, _mode) \
+       ((_hwc)->addr_filters = (void *)&hns3_pmu_hw_filter_modes[(_mode)])
+
+static ssize_t identifier_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
+
+       return sysfs_emit(buf, "0x%x\n", hns3_pmu->identifier);
+}
+static DEVICE_ATTR_RO(identifier);
+
+static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
+
+       return sysfs_emit(buf, "%d\n", hns3_pmu->on_cpu);
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static ssize_t bdf_min_show(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
+       u16 bdf = hns3_pmu->bdf_min;
+
+       return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf),
+                         PCI_SLOT(bdf), PCI_FUNC(bdf));
+}
+static DEVICE_ATTR_RO(bdf_min);
+
+static ssize_t bdf_max_show(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
+       u16 bdf = hns3_pmu->bdf_max;
+
+       return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf),
+                         PCI_SLOT(bdf), PCI_FUNC(bdf));
+}
+static DEVICE_ATTR_RO(bdf_max);
+
+static ssize_t hw_clk_freq_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
+
+       return sysfs_emit(buf, "%u\n", hns3_pmu->hw_clk_freq);
+}
+static DEVICE_ATTR_RO(hw_clk_freq);
+
+static struct attribute *hns3_pmu_events_attr[] = {
+       /* bandwidth events */
+       HNS3_PMU_BW_EVT_PAIR(bw_ssu_egu, SSU_EGU),
+       HNS3_PMU_BW_EVT_PAIR(bw_ssu_rpu, SSU_RPU),
+       HNS3_PMU_BW_EVT_PAIR(bw_ssu_roce, SSU_ROCE),
+       HNS3_PMU_BW_EVT_PAIR(bw_roce_ssu, ROCE_SSU),
+       HNS3_PMU_BW_EVT_PAIR(bw_tpu_ssu, TPU_SSU),
+       HNS3_PMU_BW_EVT_PAIR(bw_rpu_rcbrx, RPU_RCBRX),
+       HNS3_PMU_BW_EVT_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH),
+       HNS3_PMU_BW_EVT_PAIR(bw_wr_fbd, WR_FBD),
+       HNS3_PMU_BW_EVT_PAIR(bw_wr_ebd, WR_EBD),
+       HNS3_PMU_BW_EVT_PAIR(bw_rd_fbd, RD_FBD),
+       HNS3_PMU_BW_EVT_PAIR(bw_rd_ebd, RD_EBD),
+       HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m0, RD_PAY_M0),
+       HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m1, RD_PAY_M1),
+       HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m0, WR_PAY_M0),
+       HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m1, WR_PAY_M1),
+
+       /* packet rate events */
+       HNS3_PMU_PPS_EVT_PAIR(pps_igu_ssu, IGU_SSU),
+       HNS3_PMU_PPS_EVT_PAIR(pps_ssu_egu, SSU_EGU),
+       HNS3_PMU_PPS_EVT_PAIR(pps_ssu_rpu, SSU_RPU),
+       HNS3_PMU_PPS_EVT_PAIR(pps_ssu_roce, SSU_ROCE),
+       HNS3_PMU_PPS_EVT_PAIR(pps_roce_ssu, ROCE_SSU),
+       HNS3_PMU_PPS_EVT_PAIR(pps_tpu_ssu, TPU_SSU),
+       HNS3_PMU_PPS_EVT_PAIR(pps_rpu_rcbrx, RPU_RCBRX),
+       HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_tpu, RCBTX_TPU),
+       HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH),
+       HNS3_PMU_PPS_EVT_PAIR(pps_wr_fbd, WR_FBD),
+       HNS3_PMU_PPS_EVT_PAIR(pps_wr_ebd, WR_EBD),
+       HNS3_PMU_PPS_EVT_PAIR(pps_rd_fbd, RD_FBD),
+       HNS3_PMU_PPS_EVT_PAIR(pps_rd_ebd, RD_EBD),
+       HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m0, RD_PAY_M0),
+       HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m1, RD_PAY_M1),
+       HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m0, WR_PAY_M0),
+       HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m1, WR_PAY_M1),
+       HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE),
+       HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE),
+
+       /* latency events */
+       HNS3_PMU_DLY_EVT_PAIR(dly_tx_push_to_mac, TX_PUSH),
+       HNS3_PMU_DLY_EVT_PAIR(dly_tx_normal_to_mac, TX),
+       HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC),
+       HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE),
+       HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC),
+       HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE),
+       HNS3_PMU_DLY_EVT_PAIR(dly_rpu, RPU),
+       HNS3_PMU_DLY_EVT_PAIR(dly_tpu, TPU),
+       HNS3_PMU_DLY_EVT_PAIR(dly_rpe, RPE),
+       HNS3_PMU_DLY_EVT_PAIR(dly_tpe_normal, TPE),
+       HNS3_PMU_DLY_EVT_PAIR(dly_tpe_push, TPE_PUSH),
+       HNS3_PMU_DLY_EVT_PAIR(dly_wr_fbd, WR_FBD),
+       HNS3_PMU_DLY_EVT_PAIR(dly_wr_ebd, WR_EBD),
+       HNS3_PMU_DLY_EVT_PAIR(dly_rd_fbd, RD_FBD),
+       HNS3_PMU_DLY_EVT_PAIR(dly_rd_ebd, RD_EBD),
+       HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m0, RD_PAY_M0),
+       HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m1, RD_PAY_M1),
+       HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m0, WR_PAY_M0),
+       HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m1, WR_PAY_M1),
+       HNS3_PMU_DLY_EVT_PAIR(dly_msix_write, MSIX_WRITE),
+
+       /* interrupt rate events */
+       HNS3_PMU_INTR_EVT_PAIR(pps_intr_msix_nic, MSIX_NIC),
+
+       NULL
+};
+
+static struct attribute *hns3_pmu_filter_mode_attr[] = {
+       /* bandwidth events */
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_egu, SSU_EGU),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_rpu, SSU_RPU),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_roce, SSU_ROCE),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_roce_ssu, ROCE_SSU),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_tpu_ssu, TPU_SSU),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_rpu_rcbrx, RPU_RCBRX),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_fbd, WR_FBD),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_ebd, WR_EBD),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_fbd, RD_FBD),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_ebd, RD_EBD),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m0, RD_PAY_M0),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m1, RD_PAY_M1),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m0, WR_PAY_M0),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m1, WR_PAY_M1),
+
+       /* packet rate events */
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_igu_ssu, IGU_SSU),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_egu, SSU_EGU),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_rpu, SSU_RPU),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_roce, SSU_ROCE),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_roce_ssu, ROCE_SSU),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_tpu_ssu, TPU_SSU),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rpu_rcbrx, RPU_RCBRX),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_tpu, RCBTX_TPU),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_fbd, WR_FBD),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_ebd, WR_EBD),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_fbd, RD_FBD),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_ebd, RD_EBD),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m0, RD_PAY_M0),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m1, RD_PAY_M1),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m0, WR_PAY_M0),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m1, WR_PAY_M1),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE),
+
+       /* latency events */
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_push_to_mac, TX_PUSH),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_normal_to_mac, TX),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpu, RPU),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpu, TPU),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpe, RPE),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_normal, TPE),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_push, TPE_PUSH),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_fbd, WR_FBD),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_ebd, WR_EBD),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_fbd, RD_FBD),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_ebd, RD_EBD),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m0, RD_PAY_M0),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m1, RD_PAY_M1),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m0, WR_PAY_M0),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m1, WR_PAY_M1),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_msix_write, MSIX_WRITE),
+
+       /* interrupt rate events */
+       HNS3_PMU_INTR_FLT_MODE_PAIR(pps_intr_msix_nic, MSIX_NIC),
+
+       NULL
+};
+
+static struct attribute_group hns3_pmu_events_group = {
+       .name = "events",
+       .attrs = hns3_pmu_events_attr,
+};
+
+static struct attribute_group hns3_pmu_filter_mode_group = {
+       .name = "filtermode",
+       .attrs = hns3_pmu_filter_mode_attr,
+};
+
+static struct attribute *hns3_pmu_format_attr[] = {
+       HNS3_PMU_FORMAT_ATTR(subevent, "config:0-7"),
+       HNS3_PMU_FORMAT_ATTR(event_type, "config:8-15"),
+       HNS3_PMU_FORMAT_ATTR(ext_counter_used, "config:16"),
+       HNS3_PMU_FORMAT_ATTR(port, "config1:0-3"),
+       HNS3_PMU_FORMAT_ATTR(tc, "config1:4-7"),
+       HNS3_PMU_FORMAT_ATTR(bdf, "config1:8-23"),
+       HNS3_PMU_FORMAT_ATTR(queue, "config1:24-39"),
+       HNS3_PMU_FORMAT_ATTR(intr, "config1:40-51"),
+       HNS3_PMU_FORMAT_ATTR(global, "config1:52"),
+       NULL
+};
+
+static struct attribute_group hns3_pmu_format_group = {
+       .name = "format",
+       .attrs = hns3_pmu_format_attr,
+};
+
+static struct attribute *hns3_pmu_cpumask_attrs[] = {
+       &dev_attr_cpumask.attr,
+       NULL
+};
+
+static struct attribute_group hns3_pmu_cpumask_attr_group = {
+       .attrs = hns3_pmu_cpumask_attrs,
+};
+
+static struct attribute *hns3_pmu_identifier_attrs[] = {
+       &dev_attr_identifier.attr,
+       NULL
+};
+
+static struct attribute_group hns3_pmu_identifier_attr_group = {
+       .attrs = hns3_pmu_identifier_attrs,
+};
+
+static struct attribute *hns3_pmu_bdf_range_attrs[] = {
+       &dev_attr_bdf_min.attr,
+       &dev_attr_bdf_max.attr,
+       NULL
+};
+
+static struct attribute_group hns3_pmu_bdf_range_attr_group = {
+       .attrs = hns3_pmu_bdf_range_attrs,
+};
+
+static struct attribute *hns3_pmu_hw_clk_freq_attrs[] = {
+       &dev_attr_hw_clk_freq.attr,
+       NULL
+};
+
+static struct attribute_group hns3_pmu_hw_clk_freq_attr_group = {
+       .attrs = hns3_pmu_hw_clk_freq_attrs,
+};
+
+static const struct attribute_group *hns3_pmu_attr_groups[] = {
+       &hns3_pmu_events_group,
+       &hns3_pmu_filter_mode_group,
+       &hns3_pmu_format_group,
+       &hns3_pmu_cpumask_attr_group,
+       &hns3_pmu_identifier_attr_group,
+       &hns3_pmu_bdf_range_attr_group,
+       &hns3_pmu_hw_clk_freq_attr_group,
+       NULL
+};
+
+static u32 hns3_pmu_get_event(struct perf_event *event)
+{
+       return hns3_pmu_get_ext_counter_used(event) << 16 |
+              hns3_pmu_get_event_type(event) << 8 |
+              hns3_pmu_get_subevent(event);
+}
+
+static u32 hns3_pmu_get_real_event(struct perf_event *event)
+{
+       return hns3_pmu_get_event_type(event) << 8 |
+              hns3_pmu_get_subevent(event);
+}
+
+static u32 hns3_pmu_get_offset(u32 offset, u32 idx)
+{
+       return offset + HNS3_PMU_REG_EVENT_OFFSET +
+              HNS3_PMU_REG_EVENT_SIZE * idx;
+}
+
+static u32 hns3_pmu_readl(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx)
+{
+       u32 offset = hns3_pmu_get_offset(reg_offset, idx);
+
+       return readl(hns3_pmu->base + offset);
+}
+
+static void hns3_pmu_writel(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx,
+                           u32 val)
+{
+       u32 offset = hns3_pmu_get_offset(reg_offset, idx);
+
+       writel(val, hns3_pmu->base + offset);
+}
+
+static u64 hns3_pmu_readq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx)
+{
+       u32 offset = hns3_pmu_get_offset(reg_offset, idx);
+
+       return readq(hns3_pmu->base + offset);
+}
+
+static void hns3_pmu_writeq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx,
+                           u64 val)
+{
+       u32 offset = hns3_pmu_get_offset(reg_offset, idx);
+
+       writeq(val, hns3_pmu->base + offset);
+}
+
+static bool hns3_pmu_cmp_event(struct perf_event *target,
+                              struct perf_event *event)
+{
+       return hns3_pmu_get_real_event(target) == hns3_pmu_get_real_event(event);
+}
+
+static int hns3_pmu_find_related_event_idx(struct hns3_pmu *hns3_pmu,
+                                          struct perf_event *event)
+{
+       struct perf_event *sibling;
+       int hw_event_used = 0;
+       int idx;
+
+       for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
+               sibling = hns3_pmu->hw_events[idx];
+               if (!sibling)
+                       continue;
+
+               hw_event_used++;
+
+               if (!hns3_pmu_cmp_event(sibling, event))
+                       continue;
+
+               /* Related events is used in group */
+               if (sibling->group_leader == event->group_leader)
+                       return idx;
+       }
+
+       /* No related event and all hardware events are used up */
+       if (hw_event_used >= HNS3_PMU_MAX_HW_EVENTS)
+               return -EBUSY;
+
+       /* No related event and there is extra hardware events can be use */
+       return -ENOENT;
+}
+
+static int hns3_pmu_get_event_idx(struct hns3_pmu *hns3_pmu)
+{
+       int idx;
+
+       for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
+               if (!hns3_pmu->hw_events[idx])
+                       return idx;
+       }
+
+       return -EBUSY;
+}
+
+static bool hns3_pmu_valid_bdf(struct hns3_pmu *hns3_pmu, u16 bdf)
+{
+       struct pci_dev *pdev;
+
+       if (bdf < hns3_pmu->bdf_min || bdf > hns3_pmu->bdf_max) {
+               pci_err(hns3_pmu->pdev, "Invalid EP device: %#x!\n", bdf);
+               return false;
+       }
+
+       pdev = pci_get_domain_bus_and_slot(pci_domain_nr(hns3_pmu->pdev->bus),
+                                          PCI_BUS_NUM(bdf),
+                                          GET_PCI_DEVFN(bdf));
+       if (!pdev) {
+               pci_err(hns3_pmu->pdev, "Nonexistent EP device: %#x!\n", bdf);
+               return false;
+       }
+
+       pci_dev_put(pdev);
+       return true;
+}
+
+static void hns3_pmu_set_qid_para(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf,
+                                 u16 queue)
+{
+       u32 val;
+
+       val = GET_PCI_DEVFN(bdf);
+       val |= (u32)queue << HNS3_PMU_QID_PARA_QUEUE_S;
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_PARA, idx, val);
+}
+
+static bool hns3_pmu_qid_req_start(struct hns3_pmu *hns3_pmu, u32 idx)
+{
+       bool queue_id_valid = false;
+       u32 reg_qid_ctrl, val;
+       int err;
+
+       /* enable queue id request */
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx,
+                       HNS3_PMU_QID_CTRL_REQ_ENABLE);
+
+       reg_qid_ctrl = hns3_pmu_get_offset(HNS3_PMU_REG_EVENT_QID_CTRL, idx);
+       err = readl_poll_timeout(hns3_pmu->base + reg_qid_ctrl, val,
+                                val & HNS3_PMU_QID_CTRL_DONE, 1, 1000);
+       if (err == -ETIMEDOUT) {
+               pci_err(hns3_pmu->pdev, "QID request timeout!\n");
+               goto out;
+       }
+
+       queue_id_valid = !(val & HNS3_PMU_QID_CTRL_MISS);
+
+out:
+       /* disable qid request and clear status */
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, 0);
+
+       return queue_id_valid;
+}
+
+static bool hns3_pmu_valid_queue(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf,
+                                u16 queue)
+{
+       hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue);
+
+       return hns3_pmu_qid_req_start(hns3_pmu, idx);
+}
+
+static struct hns3_pmu_event_attr *hns3_pmu_get_pmu_event(u32 event)
+{
+       struct hns3_pmu_event_attr *pmu_event;
+       struct dev_ext_attribute *eattr;
+       struct device_attribute *dattr;
+       struct attribute *attr;
+       u32 i;
+
+       for (i = 0; i < ARRAY_SIZE(hns3_pmu_events_attr) - 1; i++) {
+               attr = hns3_pmu_events_attr[i];
+               dattr = container_of(attr, struct device_attribute, attr);
+               eattr = container_of(dattr, struct dev_ext_attribute, attr);
+               pmu_event = eattr->var;
+
+               if (event == pmu_event->event)
+                       return pmu_event;
+       }
+
+       return NULL;
+}
+
+static int hns3_pmu_set_func_mode(struct perf_event *event,
+                                 struct hns3_pmu *hns3_pmu)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       u16 bdf = hns3_pmu_get_bdf(event);
+
+       if (!hns3_pmu_valid_bdf(hns3_pmu, bdf))
+               return -ENOENT;
+
+       HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC);
+
+       return 0;
+}
+
+static int hns3_pmu_set_func_queue_mode(struct perf_event *event,
+                                       struct hns3_pmu *hns3_pmu)
+{
+       u16 queue_id = hns3_pmu_get_queue(event);
+       struct hw_perf_event *hwc = &event->hw;
+       u16 bdf = hns3_pmu_get_bdf(event);
+
+       if (!hns3_pmu_valid_bdf(hns3_pmu, bdf))
+               return -ENOENT;
+
+       if (!hns3_pmu_valid_queue(hns3_pmu, hwc->idx, bdf, queue_id)) {
+               pci_err(hns3_pmu->pdev, "Invalid queue: %u\n", queue_id);
+               return -ENOENT;
+       }
+
+       HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_QUEUE);
+
+       return 0;
+}
+
+static bool
+hns3_pmu_is_enabled_global_mode(struct perf_event *event,
+                               struct hns3_pmu_event_attr *pmu_event)
+{
+       u8 global = hns3_pmu_get_global(event);
+
+       if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL))
+               return false;
+
+       return global;
+}
+
+static bool hns3_pmu_is_enabled_func_mode(struct perf_event *event,
+                                         struct hns3_pmu_event_attr *pmu_event)
+{
+       u16 queue_id = hns3_pmu_get_queue(event);
+       u16 bdf = hns3_pmu_get_bdf(event);
+
+       if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC))
+               return false;
+       else if (queue_id != HNS3_PMU_FILTER_ALL_QUEUE)
+               return false;
+
+       return bdf;
+}
+
+static bool
+hns3_pmu_is_enabled_func_queue_mode(struct perf_event *event,
+                                   struct hns3_pmu_event_attr *pmu_event)
+{
+       u16 queue_id = hns3_pmu_get_queue(event);
+       u16 bdf = hns3_pmu_get_bdf(event);
+
+       if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE))
+               return false;
+       else if (queue_id == HNS3_PMU_FILTER_ALL_QUEUE)
+               return false;
+
+       return bdf;
+}
+
+static bool hns3_pmu_is_enabled_port_mode(struct perf_event *event,
+                                         struct hns3_pmu_event_attr *pmu_event)
+{
+       u8 tc_id = hns3_pmu_get_tc(event);
+
+       if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT))
+               return false;
+
+       return tc_id == HNS3_PMU_FILTER_ALL_TC;
+}
+
+static bool
+hns3_pmu_is_enabled_port_tc_mode(struct perf_event *event,
+                                struct hns3_pmu_event_attr *pmu_event)
+{
+       u8 tc_id = hns3_pmu_get_tc(event);
+
+       if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC))
+               return false;
+
+       return tc_id != HNS3_PMU_FILTER_ALL_TC;
+}
+
+static bool
+hns3_pmu_is_enabled_func_intr_mode(struct perf_event *event,
+                                  struct hns3_pmu *hns3_pmu,
+                                  struct hns3_pmu_event_attr *pmu_event)
+{
+       u16 bdf = hns3_pmu_get_bdf(event);
+
+       if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR))
+               return false;
+
+       return hns3_pmu_valid_bdf(hns3_pmu, bdf);
+}
+
+static int hns3_pmu_select_filter_mode(struct perf_event *event,
+                                      struct hns3_pmu *hns3_pmu)
+{
+       u32 event_id = hns3_pmu_get_event(event);
+       struct hw_perf_event *hwc = &event->hw;
+       struct hns3_pmu_event_attr *pmu_event;
+
+       pmu_event = hns3_pmu_get_pmu_event(event_id);
+       if (!pmu_event) {
+               pci_err(hns3_pmu->pdev, "Invalid pmu event\n");
+               return -ENOENT;
+       }
+
+       if (hns3_pmu_is_enabled_global_mode(event, pmu_event)) {
+               HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_GLOBAL);
+               return 0;
+       }
+
+       if (hns3_pmu_is_enabled_func_mode(event, pmu_event))
+               return hns3_pmu_set_func_mode(event, hns3_pmu);
+
+       if (hns3_pmu_is_enabled_func_queue_mode(event, pmu_event))
+               return hns3_pmu_set_func_queue_mode(event, hns3_pmu);
+
+       if (hns3_pmu_is_enabled_port_mode(event, pmu_event)) {
+               HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT);
+               return 0;
+       }
+
+       if (hns3_pmu_is_enabled_port_tc_mode(event, pmu_event)) {
+               HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT_TC);
+               return 0;
+       }
+
+       if (hns3_pmu_is_enabled_func_intr_mode(event, hns3_pmu, pmu_event)) {
+               HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_INTR);
+               return 0;
+       }
+
+       return -ENOENT;
+}
+
+static bool hns3_pmu_validate_event_group(struct perf_event *event)
+{
+       struct perf_event *sibling, *leader = event->group_leader;
+       struct perf_event *event_group[HNS3_PMU_MAX_HW_EVENTS];
+       int counters = 1;
+       int num;
+
+       event_group[0] = leader;
+       if (!is_software_event(leader)) {
+               if (leader->pmu != event->pmu)
+                       return false;
+
+               if (leader != event && !hns3_pmu_cmp_event(leader, event))
+                       event_group[counters++] = event;
+       }
+
+       for_each_sibling_event(sibling, event->group_leader) {
+               if (is_software_event(sibling))
+                       continue;
+
+               if (sibling->pmu != event->pmu)
+                       return false;
+
+               for (num = 0; num < counters; num++) {
+                       if (hns3_pmu_cmp_event(event_group[num], sibling))
+                               break;
+               }
+
+               if (num == counters)
+                       event_group[counters++] = sibling;
+       }
+
+       return counters <= HNS3_PMU_MAX_HW_EVENTS;
+}
+
+static u32 hns3_pmu_get_filter_condition(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       u16 intr_id = hns3_pmu_get_intr(event);
+       u8 port_id = hns3_pmu_get_port(event);
+       u16 bdf = hns3_pmu_get_bdf(event);
+       u8 tc_id = hns3_pmu_get_tc(event);
+       u8 filter_mode;
+
+       filter_mode = *(u8 *)hwc->addr_filters;
+       switch (filter_mode) {
+       case HNS3_PMU_HW_FILTER_PORT:
+               return FILTER_CONDITION_PORT(port_id);
+       case HNS3_PMU_HW_FILTER_PORT_TC:
+               return FILTER_CONDITION_PORT_TC(port_id, tc_id);
+       case HNS3_PMU_HW_FILTER_FUNC:
+       case HNS3_PMU_HW_FILTER_FUNC_QUEUE:
+               return GET_PCI_DEVFN(bdf);
+       case HNS3_PMU_HW_FILTER_FUNC_INTR:
+               return FILTER_CONDITION_FUNC_INTR(GET_PCI_DEVFN(bdf), intr_id);
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static void hns3_pmu_config_filter(struct perf_event *event)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+       u8 event_type = hns3_pmu_get_event_type(event);
+       u8 subevent_id = hns3_pmu_get_subevent(event);
+       u16 queue_id = hns3_pmu_get_queue(event);
+       struct hw_perf_event *hwc = &event->hw;
+       u8 filter_mode = *(u8 *)hwc->addr_filters;
+       u16 bdf = hns3_pmu_get_bdf(event);
+       u32 idx = hwc->idx;
+       u32 val;
+
+       val = event_type;
+       val |= subevent_id << HNS3_PMU_CTRL_SUBEVENT_S;
+       val |= filter_mode << HNS3_PMU_CTRL_FILTER_MODE_S;
+       val |= HNS3_PMU_EVENT_OVERFLOW_RESTART;
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
+
+       val = hns3_pmu_get_filter_condition(event);
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_HIGH, idx, val);
+
+       if (filter_mode == HNS3_PMU_HW_FILTER_FUNC_QUEUE)
+               hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue_id);
+}
+
+static void hns3_pmu_enable_counter(struct hns3_pmu *hns3_pmu,
+                                   struct hw_perf_event *hwc)
+{
+       u32 idx = hwc->idx;
+       u32 val;
+
+       val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
+       val |= HNS3_PMU_EVENT_EN;
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
+}
+
+static void hns3_pmu_disable_counter(struct hns3_pmu *hns3_pmu,
+                                    struct hw_perf_event *hwc)
+{
+       u32 idx = hwc->idx;
+       u32 val;
+
+       val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
+       val &= ~HNS3_PMU_EVENT_EN;
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
+}
+
+static void hns3_pmu_enable_intr(struct hns3_pmu *hns3_pmu,
+                                struct hw_perf_event *hwc)
+{
+       u32 idx = hwc->idx;
+       u32 val;
+
+       val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx);
+       val &= ~HNS3_PMU_INTR_MASK_OVERFLOW;
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val);
+}
+
+static void hns3_pmu_disable_intr(struct hns3_pmu *hns3_pmu,
+                                 struct hw_perf_event *hwc)
+{
+       u32 idx = hwc->idx;
+       u32 val;
+
+       val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx);
+       val |= HNS3_PMU_INTR_MASK_OVERFLOW;
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val);
+}
+
+static void hns3_pmu_clear_intr_status(struct hns3_pmu *hns3_pmu, u32 idx)
+{
+       u32 val;
+
+       val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
+       val |= HNS3_PMU_EVENT_STATUS_RESET;
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
+
+       val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
+       val &= ~HNS3_PMU_EVENT_STATUS_RESET;
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
+}
+
+static u64 hns3_pmu_read_counter(struct perf_event *event)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+
+       return hns3_pmu_readq(hns3_pmu, event->hw.event_base, event->hw.idx);
+}
+
+static void hns3_pmu_write_counter(struct perf_event *event, u64 value)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+       u32 idx = event->hw.idx;
+
+       hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_COUNTER, idx, value);
+       hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_EXT_COUNTER, idx, value);
+}
+
+static void hns3_pmu_init_counter(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       local64_set(&hwc->prev_count, 0);
+       hns3_pmu_write_counter(event, 0);
+}
+
+static int hns3_pmu_event_init(struct perf_event *event)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx;
+       int ret;
+
+       if (event->attr.type != event->pmu->type)
+               return -ENOENT;
+
+       /* Sampling is not supported */
+       if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+               return -EOPNOTSUPP;
+
+       event->cpu = hns3_pmu->on_cpu;
+
+       idx = hns3_pmu_get_event_idx(hns3_pmu);
+       if (idx < 0) {
+               pci_err(hns3_pmu->pdev, "Up to %u events are supported!\n",
+                       HNS3_PMU_MAX_HW_EVENTS);
+               return -EBUSY;
+       }
+
+       hwc->idx = idx;
+
+       ret = hns3_pmu_select_filter_mode(event, hns3_pmu);
+       if (ret) {
+               pci_err(hns3_pmu->pdev, "Invalid filter, ret = %d.\n", ret);
+               return ret;
+       }
+
+       if (!hns3_pmu_validate_event_group(event)) {
+               pci_err(hns3_pmu->pdev, "Invalid event group.\n");
+               return -EINVAL;
+       }
+
+       if (hns3_pmu_get_ext_counter_used(event))
+               hwc->event_base = HNS3_PMU_REG_EVENT_EXT_COUNTER;
+       else
+               hwc->event_base = HNS3_PMU_REG_EVENT_COUNTER;
+
+       return 0;
+}
+
+static void hns3_pmu_read(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       u64 new_cnt, prev_cnt, delta;
+
+       do {
+               prev_cnt = local64_read(&hwc->prev_count);
+               new_cnt = hns3_pmu_read_counter(event);
+       } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) !=
+                prev_cnt);
+
+       delta = new_cnt - prev_cnt;
+       local64_add(delta, &event->count);
+}
+
+static void hns3_pmu_start(struct perf_event *event, int flags)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+               return;
+
+       WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+       hwc->state = 0;
+
+       hns3_pmu_config_filter(event);
+       hns3_pmu_init_counter(event);
+       hns3_pmu_enable_intr(hns3_pmu, hwc);
+       hns3_pmu_enable_counter(hns3_pmu, hwc);
+
+       perf_event_update_userpage(event);
+}
+
+static void hns3_pmu_stop(struct perf_event *event, int flags)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+
+       hns3_pmu_disable_counter(hns3_pmu, hwc);
+       hns3_pmu_disable_intr(hns3_pmu, hwc);
+
+       WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+       hwc->state |= PERF_HES_STOPPED;
+
+       if (hwc->state & PERF_HES_UPTODATE)
+               return;
+
+       /* Read hardware counter and update the perf counter statistics */
+       hns3_pmu_read(event);
+       hwc->state |= PERF_HES_UPTODATE;
+}
+
+static int hns3_pmu_add(struct perf_event *event, int flags)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx;
+
+       hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+       /* Check all working events to find a related event. */
+       idx = hns3_pmu_find_related_event_idx(hns3_pmu, event);
+       if (idx < 0 && idx != -ENOENT)
+               return idx;
+
+       /* Current event shares an enabled hardware event with related event */
+       if (idx >= 0 && idx < HNS3_PMU_MAX_HW_EVENTS) {
+               hwc->idx = idx;
+               goto start_count;
+       }
+
+       idx = hns3_pmu_get_event_idx(hns3_pmu);
+       if (idx < 0)
+               return idx;
+
+       hwc->idx = idx;
+       hns3_pmu->hw_events[idx] = event;
+
+start_count:
+       if (flags & PERF_EF_START)
+               hns3_pmu_start(event, PERF_EF_RELOAD);
+
+       return 0;
+}
+
+static void hns3_pmu_del(struct perf_event *event, int flags)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+
+       hns3_pmu_stop(event, PERF_EF_UPDATE);
+       hns3_pmu->hw_events[hwc->idx] = NULL;
+       perf_event_update_userpage(event);
+}
+
+static void hns3_pmu_enable(struct pmu *pmu)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu);
+       u32 val;
+
+       val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
+       val |= HNS3_PMU_GLOBAL_START;
+       writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
+}
+
+static void hns3_pmu_disable(struct pmu *pmu)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu);
+       u32 val;
+
+       val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
+       val &= ~HNS3_PMU_GLOBAL_START;
+       writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
+}
+
+static int hns3_pmu_alloc_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
+{
+       u16 device_id;
+       char *name;
+       u32 val;
+
+       hns3_pmu->base = pcim_iomap_table(pdev)[BAR_2];
+       if (!hns3_pmu->base) {
+               pci_err(pdev, "ioremap failed\n");
+               return -ENOMEM;
+       }
+
+       hns3_pmu->hw_clk_freq = readl(hns3_pmu->base + HNS3_PMU_REG_CLOCK_FREQ);
+
+       val = readl(hns3_pmu->base + HNS3_PMU_REG_BDF);
+       hns3_pmu->bdf_min = val & 0xffff;
+       hns3_pmu->bdf_max = val >> 16;
+
+       val = readl(hns3_pmu->base + HNS3_PMU_REG_DEVICE_ID);
+       device_id = val & 0xffff;
+       name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hns3_pmu_sicl_%u", device_id);
+       if (!name)
+               return -ENOMEM;
+
+       hns3_pmu->pdev = pdev;
+       hns3_pmu->on_cpu = -1;
+       hns3_pmu->identifier = readl(hns3_pmu->base + HNS3_PMU_REG_VERSION);
+       hns3_pmu->pmu = (struct pmu) {
+               .name           = name,
+               .module         = THIS_MODULE,
+               .event_init     = hns3_pmu_event_init,
+               .pmu_enable     = hns3_pmu_enable,
+               .pmu_disable    = hns3_pmu_disable,
+               .add            = hns3_pmu_add,
+               .del            = hns3_pmu_del,
+               .start          = hns3_pmu_start,
+               .stop           = hns3_pmu_stop,
+               .read           = hns3_pmu_read,
+               .task_ctx_nr    = perf_invalid_context,
+               .attr_groups    = hns3_pmu_attr_groups,
+               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
+       };
+
+       return 0;
+}
+
+static irqreturn_t hns3_pmu_irq(int irq, void *data)
+{
+       struct hns3_pmu *hns3_pmu = data;
+       u32 intr_status, idx;
+
+       for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
+               intr_status = hns3_pmu_readl(hns3_pmu,
+                                            HNS3_PMU_REG_EVENT_INTR_STATUS,
+                                            idx);
+
+               /*
+                * As each counter will restart from 0 when it is overflowed,
+                * extra processing is no need, just clear interrupt status.
+                */
+               if (intr_status)
+                       hns3_pmu_clear_intr_status(hns3_pmu, idx);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int hns3_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+       struct hns3_pmu *hns3_pmu;
+
+       hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node);
+       if (!hns3_pmu)
+               return -ENODEV;
+
+       if (hns3_pmu->on_cpu == -1) {
+               hns3_pmu->on_cpu = cpu;
+               irq_set_affinity(hns3_pmu->irq, cpumask_of(cpu));
+       }
+
+       return 0;
+}
+
+static int hns3_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+       struct hns3_pmu *hns3_pmu;
+       unsigned int target;
+
+       hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node);
+       if (!hns3_pmu)
+               return -ENODEV;
+
+       /* Nothing to do if this CPU doesn't own the PMU */
+       if (hns3_pmu->on_cpu != cpu)
+               return 0;
+
+       /* Choose a new CPU from all online cpus */
+       target = cpumask_any_but(cpu_online_mask, cpu);
+       if (target >= nr_cpu_ids)
+               return 0;
+
+       perf_pmu_migrate_context(&hns3_pmu->pmu, cpu, target);
+       hns3_pmu->on_cpu = target;
+       irq_set_affinity(hns3_pmu->irq, cpumask_of(target));
+
+       return 0;
+}
+
+static void hns3_pmu_free_irq(void *data)
+{
+       struct pci_dev *pdev = data;
+
+       pci_free_irq_vectors(pdev);
+}
+
+static int hns3_pmu_irq_register(struct pci_dev *pdev,
+                                struct hns3_pmu *hns3_pmu)
+{
+       int irq, ret;
+
+       ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+       if (ret < 0) {
+               pci_err(pdev, "failed to enable MSI vectors, ret = %d.\n", ret);
+               return ret;
+       }
+
+       ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev);
+       if (ret) {
+               pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret);
+               return ret;
+       }
+
+       irq = pci_irq_vector(pdev, 0);
+       ret = devm_request_irq(&pdev->dev, irq, hns3_pmu_irq, 0,
+                              hns3_pmu->pmu.name, hns3_pmu);
+       if (ret) {
+               pci_err(pdev, "failed to register irq, ret = %d.\n", ret);
+               return ret;
+       }
+
+       hns3_pmu->irq = irq;
+
+       return 0;
+}
+
+static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
+{
+       int ret;
+
+       ret = hns3_pmu_alloc_pmu(pdev, hns3_pmu);
+       if (ret)
+               return ret;
+
+       ret = hns3_pmu_irq_register(pdev, hns3_pmu);
+       if (ret)
+               return ret;
+
+       ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+                                      &hns3_pmu->node);
+       if (ret) {
+               pci_err(pdev, "failed to register hotplug, ret = %d.\n", ret);
+               return ret;
+       }
+
+       ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1);
+       if (ret) {
+               pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret);
+               cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+                                           &hns3_pmu->node);
+       }
+
+       return ret;
+}
+
+static void hns3_pmu_uninit_pmu(struct pci_dev *pdev)
+{
+       struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev);
+
+       perf_pmu_unregister(&hns3_pmu->pmu);
+       cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+                                   &hns3_pmu->node);
+}
+
+static int hns3_pmu_init_dev(struct pci_dev *pdev)
+{
+       int ret;
+
+       ret = pcim_enable_device(pdev);
+       if (ret) {
+               pci_err(pdev, "failed to enable pci device, ret = %d.\n", ret);
+               return ret;
+       }
+
+       ret = pcim_iomap_regions(pdev, BIT(BAR_2), "hns3_pmu");
+       if (ret < 0) {
+               pci_err(pdev, "failed to request pci region, ret = %d.\n", ret);
+               return ret;
+       }
+
+       pci_set_master(pdev);
+
+       return 0;
+}
+
+static int hns3_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct hns3_pmu *hns3_pmu;
+       int ret;
+
+       hns3_pmu = devm_kzalloc(&pdev->dev, sizeof(*hns3_pmu), GFP_KERNEL);
+       if (!hns3_pmu)
+               return -ENOMEM;
+
+       ret = hns3_pmu_init_dev(pdev);
+       if (ret)
+               return ret;
+
+       ret = hns3_pmu_init_pmu(pdev, hns3_pmu);
+       if (ret) {
+               pci_clear_master(pdev);
+               return ret;
+       }
+
+       pci_set_drvdata(pdev, hns3_pmu);
+
+       return ret;
+}
+
+static void hns3_pmu_remove(struct pci_dev *pdev)
+{
+       hns3_pmu_uninit_pmu(pdev);
+       pci_clear_master(pdev);
+       pci_set_drvdata(pdev, NULL);
+}
+
+static const struct pci_device_id hns3_pmu_ids[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa22b) },
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, hns3_pmu_ids);
+
+static struct pci_driver hns3_pmu_driver = {
+       .name = "hns3_pmu",
+       .id_table = hns3_pmu_ids,
+       .probe = hns3_pmu_probe,
+       .remove = hns3_pmu_remove,
+};
+
+static int __init hns3_pmu_module_init(void)
+{
+       int ret;
+
+       ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+                                     "AP_PERF_ARM_HNS3_PMU_ONLINE",
+                                     hns3_pmu_online_cpu,
+                                     hns3_pmu_offline_cpu);
+       if (ret) {
+               pr_err("failed to setup HNS3 PMU hotplug, ret = %d.\n", ret);
+               return ret;
+       }
+
+       ret = pci_register_driver(&hns3_pmu_driver);
+       if (ret) {
+               pr_err("failed to register pci driver, ret = %d.\n", ret);
+               cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE);
+       }
+
+       return ret;
+}
+module_init(hns3_pmu_module_init);
+
+static void __exit hns3_pmu_module_exit(void)
+{
+       pci_unregister_driver(&hns3_pmu_driver);
+       cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE);
+}
+module_exit(hns3_pmu_module_exit);
+
+MODULE_DESCRIPTION("HNS3 PMU driver");
+MODULE_LICENSE("GPL v2");
index 282d3a0..69c3050 100644 (file)
@@ -2,10 +2,6 @@
 /* Marvell CN10K LLC-TAD perf driver
  *
  * Copyright (C) 2021 Marvell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #define pr_fmt(fmt) "tad_pmu: " fmt
@@ -18,9 +14,9 @@
 #include <linux/perf_event.h>
 #include <linux/platform_device.h>
 
-#define TAD_PFC_OFFSET         0x0
+#define TAD_PFC_OFFSET         0x800
 #define TAD_PFC(counter)       (TAD_PFC_OFFSET | (counter << 3))
-#define TAD_PRF_OFFSET         0x100
+#define TAD_PRF_OFFSET         0x900
 #define TAD_PRF(counter)       (TAD_PRF_OFFSET | (counter << 3))
 #define TAD_PRF_CNTSEL_MASK    0xFF
 #define TAD_MAX_COUNTERS       8
@@ -100,9 +96,7 @@ static void tad_pmu_event_counter_start(struct perf_event *event, int flags)
         * which sets TAD()_PRF()[CNTSEL] != 0
         */
        for (i = 0; i < tad_pmu->region_cnt; i++) {
-               reg_val = readq_relaxed(tad_pmu->regions[i].base +
-                                       TAD_PRF(counter_idx));
-               reg_val |= (event_idx & 0xFF);
+               reg_val = event_idx & 0xFF;
                writeq_relaxed(reg_val, tad_pmu->regions[i].base +
                               TAD_PRF(counter_idx));
        }
index b2b8d20..2c96183 100644 (file)
@@ -121,7 +121,7 @@ u64 riscv_pmu_event_update(struct perf_event *event)
        return delta;
 }
 
-static void riscv_pmu_stop(struct perf_event *event, int flags)
+void riscv_pmu_stop(struct perf_event *event, int flags)
 {
        struct hw_perf_event *hwc = &event->hw;
        struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
@@ -175,7 +175,7 @@ int riscv_pmu_event_set_period(struct perf_event *event)
        return overflow;
 }
 
-static void riscv_pmu_start(struct perf_event *event, int flags)
+void riscv_pmu_start(struct perf_event *event, int flags)
 {
        struct hw_perf_event *hwc = &event->hw;
        struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
index dca3537..79a3de5 100644 (file)
 #include <linux/irqdomain.h>
 #include <linux/of_irq.h>
 #include <linux/of.h>
+#include <linux/cpu_pm.h>
 
 #include <asm/sbi.h>
 #include <asm/hwcap.h>
 
+PMU_FORMAT_ATTR(event, "config:0-47");
+PMU_FORMAT_ATTR(firmware, "config:63");
+
+static struct attribute *riscv_arch_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_firmware.attr,
+       NULL,
+};
+
+static struct attribute_group riscv_pmu_format_group = {
+       .name = "format",
+       .attrs = riscv_arch_formats_attr,
+};
+
+static const struct attribute_group *riscv_pmu_attr_groups[] = {
+       &riscv_pmu_format_group,
+       NULL,
+};
+
 union sbi_pmu_ctr_info {
        unsigned long value;
        struct {
@@ -666,12 +686,15 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
                child = of_get_compatible_child(cpu, "riscv,cpu-intc");
                if (!child) {
                        pr_err("Failed to find INTC node\n");
+                       of_node_put(cpu);
                        return -ENODEV;
                }
                domain = irq_find_host(child);
                of_node_put(child);
-               if (domain)
+               if (domain) {
+                       of_node_put(cpu);
                        break;
+               }
        }
        if (!domain) {
                pr_err("Failed to find INTC IRQ root domain\n");
@@ -693,6 +716,73 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
        return 0;
 }
 
+#ifdef CONFIG_CPU_PM
+static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
+                               void *v)
+{
+       struct riscv_pmu *rvpmu = container_of(b, struct riscv_pmu, riscv_pm_nb);
+       struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
+       int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS);
+       struct perf_event *event;
+       int idx;
+
+       if (!enabled)
+               return NOTIFY_OK;
+
+       for (idx = 0; idx < RISCV_MAX_COUNTERS; idx++) {
+               event = cpuc->events[idx];
+               if (!event)
+                       continue;
+
+               switch (cmd) {
+               case CPU_PM_ENTER:
+                       /*
+                        * Stop and update the counter
+                        */
+                       riscv_pmu_stop(event, PERF_EF_UPDATE);
+                       break;
+               case CPU_PM_EXIT:
+               case CPU_PM_ENTER_FAILED:
+                       /*
+                        * Restore and enable the counter.
+                        *
+                        * Requires RCU read locking to be functional,
+                        * wrap the call within RCU_NONIDLE to make the
+                        * RCU subsystem aware this cpu is not idle from
+                        * an RCU perspective for the riscv_pmu_start() call
+                        * duration.
+                        */
+                       RCU_NONIDLE(riscv_pmu_start(event, PERF_EF_RELOAD));
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return NOTIFY_OK;
+}
+
+static int riscv_pm_pmu_register(struct riscv_pmu *pmu)
+{
+       pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify;
+       return cpu_pm_register_notifier(&pmu->riscv_pm_nb);
+}
+
+static void riscv_pm_pmu_unregister(struct riscv_pmu *pmu)
+{
+       cpu_pm_unregister_notifier(&pmu->riscv_pm_nb);
+}
+#else
+static inline int riscv_pm_pmu_register(struct riscv_pmu *pmu) { return 0; }
+static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { }
+#endif
+
+static void riscv_pmu_destroy(struct riscv_pmu *pmu)
+{
+       riscv_pm_pmu_unregister(pmu);
+       cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
+}
+
 static int pmu_sbi_device_probe(struct platform_device *pdev)
 {
        struct riscv_pmu *pmu = NULL;
@@ -720,6 +810,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
                pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
                pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
        }
+       pmu->pmu.attr_groups = riscv_pmu_attr_groups;
        pmu->num_counters = num_counters;
        pmu->ctr_start = pmu_sbi_ctr_start;
        pmu->ctr_stop = pmu_sbi_ctr_stop;
@@ -733,14 +824,19 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       ret = riscv_pm_pmu_register(pmu);
+       if (ret)
+               goto out_unregister;
+
        ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
-       if (ret) {
-               cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
-               return ret;
-       }
+       if (ret)
+               goto out_unregister;
 
        return 0;
 
+out_unregister:
+       riscv_pmu_destroy(pmu);
+
 out_free:
        kfree(pmu);
        return ret;
index f52960d..bff144c 100644 (file)
@@ -32,7 +32,7 @@ config DEBUG_PINCTRL
          Say Y here to add some extra checks and diagnostics to PINCTRL calls.
 
 config PINCTRL_AMD
-       tristate "AMD GPIO pin control"
+       bool "AMD GPIO pin control"
        depends on HAS_IOMEM
        depends on ACPI || COMPILE_TEST
        select GPIOLIB
index c94e24a..83d47ff 100644 (file)
@@ -236,11 +236,11 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
                const struct aspeed_sig_expr **funcs;
                const struct aspeed_sig_expr ***prios;
 
-               pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name);
-
                if (!pdesc)
                        return -EINVAL;
 
+               pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name);
+
                prios = pdesc->prios;
 
                if (!prios)
index c0630f6..417e41b 100644 (file)
@@ -239,6 +239,7 @@ static const struct pinctrl_pin_desc imx93_pinctrl_pads[] = {
 static const struct imx_pinctrl_soc_info imx93_pinctrl_info = {
        .pins = imx93_pinctrl_pads,
        .npins = ARRAY_SIZE(imx93_pinctrl_pads),
+       .flags = ZERO_OFFSET_VALID,
        .gpr_compatible = "fsl,imx93-iomuxc-gpr",
 };
 
index a140b6b..bcde042 100644 (file)
@@ -102,7 +102,7 @@ struct armada_37xx_pinctrl {
        struct device                   *dev;
        struct gpio_chip                gpio_chip;
        struct irq_chip                 irq_chip;
-       spinlock_t                      irq_lock;
+       raw_spinlock_t                  irq_lock;
        struct pinctrl_desc             pctl;
        struct pinctrl_dev              *pctl_dev;
        struct armada_37xx_pin_group    *groups;
@@ -523,9 +523,9 @@ static void armada_37xx_irq_ack(struct irq_data *d)
        unsigned long flags;
 
        armada_37xx_irq_update_reg(&reg, d);
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        writel(d->mask, info->base + reg);
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
 }
 
 static void armada_37xx_irq_mask(struct irq_data *d)
@@ -536,10 +536,10 @@ static void armada_37xx_irq_mask(struct irq_data *d)
        unsigned long flags;
 
        armada_37xx_irq_update_reg(&reg, d);
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        val = readl(info->base + reg);
        writel(val & ~d->mask, info->base + reg);
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
 }
 
 static void armada_37xx_irq_unmask(struct irq_data *d)
@@ -550,10 +550,10 @@ static void armada_37xx_irq_unmask(struct irq_data *d)
        unsigned long flags;
 
        armada_37xx_irq_update_reg(&reg, d);
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        val = readl(info->base + reg);
        writel(val | d->mask, info->base + reg);
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
 }
 
 static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -564,14 +564,14 @@ static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on)
        unsigned long flags;
 
        armada_37xx_irq_update_reg(&reg, d);
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        val = readl(info->base + reg);
        if (on)
                val |= (BIT(d->hwirq % GPIO_PER_REG));
        else
                val &= ~(BIT(d->hwirq % GPIO_PER_REG));
        writel(val, info->base + reg);
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
 
        return 0;
 }
@@ -583,7 +583,7 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
        u32 val, reg = IRQ_POL;
        unsigned long flags;
 
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        armada_37xx_irq_update_reg(&reg, d);
        val = readl(info->base + reg);
        switch (type) {
@@ -607,11 +607,11 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
                break;
        }
        default:
-               spin_unlock_irqrestore(&info->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&info->irq_lock, flags);
                return -EINVAL;
        }
        writel(val, info->base + reg);
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
 
        return 0;
 }
@@ -626,7 +626,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
 
        regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l);
 
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        p = readl(info->base + IRQ_POL + 4 * reg_idx);
        if ((p ^ l) & (1 << bit_num)) {
                /*
@@ -647,7 +647,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
                ret = -1;
        }
 
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
        return ret;
 }
 
@@ -664,11 +664,11 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
                u32 status;
                unsigned long flags;
 
-               spin_lock_irqsave(&info->irq_lock, flags);
+               raw_spin_lock_irqsave(&info->irq_lock, flags);
                status = readl_relaxed(info->base + IRQ_STATUS + 4 * i);
                /* Manage only the interrupt that was enabled */
                status &= readl_relaxed(info->base + IRQ_EN + 4 * i);
-               spin_unlock_irqrestore(&info->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&info->irq_lock, flags);
                while (status) {
                        u32 hwirq = ffs(status) - 1;
                        u32 virq = irq_find_mapping(d, hwirq +
@@ -695,12 +695,12 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
 
 update_status:
                        /* Update status in case a new IRQ appears */
-                       spin_lock_irqsave(&info->irq_lock, flags);
+                       raw_spin_lock_irqsave(&info->irq_lock, flags);
                        status = readl_relaxed(info->base +
                                               IRQ_STATUS + 4 * i);
                        /* Manage only the interrupt that was enabled */
                        status &= readl_relaxed(info->base + IRQ_EN + 4 * i);
-                       spin_unlock_irqrestore(&info->irq_lock, flags);
+                       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
                }
        }
        chained_irq_exit(chip, desc);
@@ -731,7 +731,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
        struct device *dev = &pdev->dev;
        unsigned int i, nr_irq_parent;
 
-       spin_lock_init(&info->irq_lock);
+       raw_spin_lock_init(&info->irq_lock);
 
        nr_irq_parent = of_irq_count(np);
        if (!nr_irq_parent) {
@@ -1107,25 +1107,40 @@ static const struct of_device_id armada_37xx_pinctrl_of_match[] = {
        { },
 };
 
+static const struct regmap_config armada_37xx_pinctrl_regmap_config = {
+       .reg_bits = 32,
+       .val_bits = 32,
+       .reg_stride = 4,
+       .use_raw_spinlock = true,
+};
+
 static int __init armada_37xx_pinctrl_probe(struct platform_device *pdev)
 {
        struct armada_37xx_pinctrl *info;
        struct device *dev = &pdev->dev;
-       struct device_node *np = dev->of_node;
        struct regmap *regmap;
+       void __iomem *base;
        int ret;
 
+       base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+       if (IS_ERR(base)) {
+               dev_err(dev, "failed to ioremap base address: %pe\n", base);
+               return PTR_ERR(base);
+       }
+
+       regmap = devm_regmap_init_mmio(dev, base,
+                                      &armada_37xx_pinctrl_regmap_config);
+       if (IS_ERR(regmap)) {
+               dev_err(dev, "failed to create regmap: %pe\n", regmap);
+               return PTR_ERR(regmap);
+       }
+
        info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
        info->dev = dev;
-
-       regmap = syscon_node_to_regmap(np);
-       if (IS_ERR(regmap))
-               return dev_err_probe(dev, PTR_ERR(regmap), "cannot get regmap\n");
        info->regmap = regmap;
-
        info->data = of_device_get_match_data(dev);
 
        ret = armada_37xx_pinctrl_register(pdev, info);
index 5f4a8c5..dfc8ea9 100644 (file)
 #define ocelot_clrsetbits(addr, clear, set) \
        writel((readl(addr) & ~(clear)) | (set), (addr))
 
-/* PINCONFIG bits (sparx5 only) */
 enum {
        PINCONF_BIAS,
        PINCONF_SCHMITT,
        PINCONF_DRIVE_STRENGTH,
 };
 
-#define BIAS_PD_BIT BIT(4)
-#define BIAS_PU_BIT BIT(3)
-#define BIAS_BITS   (BIAS_PD_BIT|BIAS_PU_BIT)
-#define SCHMITT_BIT BIT(2)
-#define DRIVE_BITS  GENMASK(1, 0)
-
 /* GPIO standard registers */
 #define OCELOT_GPIO_OUT_SET    0x0
 #define OCELOT_GPIO_OUT_CLR    0x4
@@ -321,6 +314,13 @@ struct ocelot_pin_caps {
        unsigned char a_functions[OCELOT_FUNC_PER_PIN]; /* Additional functions */
 };
 
+struct ocelot_pincfg_data {
+       u8 pd_bit;
+       u8 pu_bit;
+       u8 drive_bits;
+       u8 schmitt_bit;
+};
+
 struct ocelot_pinctrl {
        struct device *dev;
        struct pinctrl_dev *pctl;
@@ -328,10 +328,16 @@ struct ocelot_pinctrl {
        struct regmap *map;
        struct regmap *pincfg;
        struct pinctrl_desc *desc;
+       const struct ocelot_pincfg_data *pincfg_data;
        struct ocelot_pmx_func func[FUNC_MAX];
        u8 stride;
 };
 
+struct ocelot_match_data {
+       struct pinctrl_desc desc;
+       struct ocelot_pincfg_data pincfg_data;
+};
+
 #define LUTON_P(p, f0, f1)                                             \
 static struct ocelot_pin_caps luton_pin_##p = {                                \
        .pin = p,                                                       \
@@ -1325,24 +1331,27 @@ static int ocelot_hw_get_value(struct ocelot_pinctrl *info,
        int ret = -EOPNOTSUPP;
 
        if (info->pincfg) {
+               const struct ocelot_pincfg_data *opd = info->pincfg_data;
                u32 regcfg;
 
-               ret = regmap_read(info->pincfg, pin, &regcfg);
+               ret = regmap_read(info->pincfg,
+                                 pin * regmap_get_reg_stride(info->pincfg),
+                                 &regcfg);
                if (ret)
                        return ret;
 
                ret = 0;
                switch (reg) {
                case PINCONF_BIAS:
-                       *val = regcfg & BIAS_BITS;
+                       *val = regcfg & (opd->pd_bit | opd->pu_bit);
                        break;
 
                case PINCONF_SCHMITT:
-                       *val = regcfg & SCHMITT_BIT;
+                       *val = regcfg & opd->schmitt_bit;
                        break;
 
                case PINCONF_DRIVE_STRENGTH:
-                       *val = regcfg & DRIVE_BITS;
+                       *val = regcfg & opd->drive_bits;
                        break;
 
                default:
@@ -1359,14 +1368,18 @@ static int ocelot_pincfg_clrsetbits(struct ocelot_pinctrl *info, u32 regaddr,
        u32 val;
        int ret;
 
-       ret = regmap_read(info->pincfg, regaddr, &val);
+       ret = regmap_read(info->pincfg,
+                         regaddr * regmap_get_reg_stride(info->pincfg),
+                         &val);
        if (ret)
                return ret;
 
        val &= ~clrbits;
        val |= setbits;
 
-       ret = regmap_write(info->pincfg, regaddr, val);
+       ret = regmap_write(info->pincfg,
+                          regaddr * regmap_get_reg_stride(info->pincfg),
+                          val);
 
        return ret;
 }
@@ -1379,23 +1392,27 @@ static int ocelot_hw_set_value(struct ocelot_pinctrl *info,
        int ret = -EOPNOTSUPP;
 
        if (info->pincfg) {
+               const struct ocelot_pincfg_data *opd = info->pincfg_data;
 
                ret = 0;
                switch (reg) {
                case PINCONF_BIAS:
-                       ret = ocelot_pincfg_clrsetbits(info, pin, BIAS_BITS,
+                       ret = ocelot_pincfg_clrsetbits(info, pin,
+                                                      opd->pd_bit | opd->pu_bit,
                                                       val);
                        break;
 
                case PINCONF_SCHMITT:
-                       ret = ocelot_pincfg_clrsetbits(info, pin, SCHMITT_BIT,
+                       ret = ocelot_pincfg_clrsetbits(info, pin,
+                                                      opd->schmitt_bit,
                                                       val);
                        break;
 
                case PINCONF_DRIVE_STRENGTH:
                        if (val <= 3)
                                ret = ocelot_pincfg_clrsetbits(info, pin,
-                                                              DRIVE_BITS, val);
+                                                              opd->drive_bits,
+                                                              val);
                        else
                                ret = -EINVAL;
                        break;
@@ -1425,17 +1442,20 @@ static int ocelot_pinconf_get(struct pinctrl_dev *pctldev,
                if (param == PIN_CONFIG_BIAS_DISABLE)
                        val = (val == 0);
                else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
-                       val = (val & BIAS_PD_BIT ? true : false);
+                       val = !!(val & info->pincfg_data->pd_bit);
                else    /* PIN_CONFIG_BIAS_PULL_UP */
-                       val = (val & BIAS_PU_BIT ? true : false);
+                       val = !!(val & info->pincfg_data->pu_bit);
                break;
 
        case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+               if (!info->pincfg_data->schmitt_bit)
+                       return -EOPNOTSUPP;
+
                err = ocelot_hw_get_value(info, pin, PINCONF_SCHMITT, &val);
                if (err)
                        return err;
 
-               val = (val & SCHMITT_BIT ? true : false);
+               val = !!(val & info->pincfg_data->schmitt_bit);
                break;
 
        case PIN_CONFIG_DRIVE_STRENGTH:
@@ -1479,6 +1499,7 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                              unsigned long *configs, unsigned int num_configs)
 {
        struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+       const struct ocelot_pincfg_data *opd = info->pincfg_data;
        u32 param, arg, p;
        int cfg, err = 0;
 
@@ -1491,8 +1512,8 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                case PIN_CONFIG_BIAS_PULL_UP:
                case PIN_CONFIG_BIAS_PULL_DOWN:
                        arg = (param == PIN_CONFIG_BIAS_DISABLE) ? 0 :
-                       (param == PIN_CONFIG_BIAS_PULL_UP) ? BIAS_PU_BIT :
-                       BIAS_PD_BIT;
+                             (param == PIN_CONFIG_BIAS_PULL_UP) ?
+                               opd->pu_bit : opd->pd_bit;
 
                        err = ocelot_hw_set_value(info, pin, PINCONF_BIAS, arg);
                        if (err)
@@ -1501,7 +1522,10 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                        break;
 
                case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
-                       arg = arg ? SCHMITT_BIT : 0;
+                       if (!opd->schmitt_bit)
+                               return -EOPNOTSUPP;
+
+                       arg = arg ? opd->schmitt_bit : 0;
                        err = ocelot_hw_set_value(info, pin, PINCONF_SCHMITT,
                                                  arg);
                        if (err)
@@ -1562,69 +1586,94 @@ static const struct pinctrl_ops ocelot_pctl_ops = {
        .dt_free_map = pinconf_generic_dt_free_map,
 };
 
-static struct pinctrl_desc luton_desc = {
-       .name = "luton-pinctrl",
-       .pins = luton_pins,
-       .npins = ARRAY_SIZE(luton_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data luton_desc = {
+       .desc = {
+               .name = "luton-pinctrl",
+               .pins = luton_pins,
+               .npins = ARRAY_SIZE(luton_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .owner = THIS_MODULE,
+       },
 };
 
-static struct pinctrl_desc serval_desc = {
-       .name = "serval-pinctrl",
-       .pins = serval_pins,
-       .npins = ARRAY_SIZE(serval_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data serval_desc = {
+       .desc = {
+               .name = "serval-pinctrl",
+               .pins = serval_pins,
+               .npins = ARRAY_SIZE(serval_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .owner = THIS_MODULE,
+       },
 };
 
-static struct pinctrl_desc ocelot_desc = {
-       .name = "ocelot-pinctrl",
-       .pins = ocelot_pins,
-       .npins = ARRAY_SIZE(ocelot_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data ocelot_desc = {
+       .desc = {
+               .name = "ocelot-pinctrl",
+               .pins = ocelot_pins,
+               .npins = ARRAY_SIZE(ocelot_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .owner = THIS_MODULE,
+       },
 };
 
-static struct pinctrl_desc jaguar2_desc = {
-       .name = "jaguar2-pinctrl",
-       .pins = jaguar2_pins,
-       .npins = ARRAY_SIZE(jaguar2_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data jaguar2_desc = {
+       .desc = {
+               .name = "jaguar2-pinctrl",
+               .pins = jaguar2_pins,
+               .npins = ARRAY_SIZE(jaguar2_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .owner = THIS_MODULE,
+       },
 };
 
-static struct pinctrl_desc servalt_desc = {
-       .name = "servalt-pinctrl",
-       .pins = servalt_pins,
-       .npins = ARRAY_SIZE(servalt_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data servalt_desc = {
+       .desc = {
+               .name = "servalt-pinctrl",
+               .pins = servalt_pins,
+               .npins = ARRAY_SIZE(servalt_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .owner = THIS_MODULE,
+       },
 };
 
-static struct pinctrl_desc sparx5_desc = {
-       .name = "sparx5-pinctrl",
-       .pins = sparx5_pins,
-       .npins = ARRAY_SIZE(sparx5_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .confops = &ocelot_confops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data sparx5_desc = {
+       .desc = {
+               .name = "sparx5-pinctrl",
+               .pins = sparx5_pins,
+               .npins = ARRAY_SIZE(sparx5_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .confops = &ocelot_confops,
+               .owner = THIS_MODULE,
+       },
+       .pincfg_data = {
+               .pd_bit = BIT(4),
+               .pu_bit = BIT(3),
+               .drive_bits = GENMASK(1, 0),
+               .schmitt_bit = BIT(2),
+       },
 };
 
-static struct pinctrl_desc lan966x_desc = {
-       .name = "lan966x-pinctrl",
-       .pins = lan966x_pins,
-       .npins = ARRAY_SIZE(lan966x_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &lan966x_pmx_ops,
-       .confops = &ocelot_confops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data lan966x_desc = {
+       .desc = {
+               .name = "lan966x-pinctrl",
+               .pins = lan966x_pins,
+               .npins = ARRAY_SIZE(lan966x_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &lan966x_pmx_ops,
+               .confops = &ocelot_confops,
+               .owner = THIS_MODULE,
+       },
+       .pincfg_data = {
+               .pd_bit = BIT(3),
+               .pu_bit = BIT(2),
+               .drive_bits = GENMASK(1, 0),
+       },
 };
 
 static int ocelot_create_group_func_map(struct device *dev,
@@ -1890,7 +1939,8 @@ static const struct of_device_id ocelot_pinctrl_of_match[] = {
        {},
 };
 
-static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
+static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev,
+                                                  const struct ocelot_pinctrl *info)
 {
        void __iomem *base;
 
@@ -1898,7 +1948,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
                .reg_bits = 32,
                .val_bits = 32,
                .reg_stride = 4,
-               .max_register = 32,
+               .max_register = info->desc->npins * 4,
                .name = "pincfg",
        };
 
@@ -1913,6 +1963,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
 
 static int ocelot_pinctrl_probe(struct platform_device *pdev)
 {
+       const struct ocelot_match_data *data;
        struct device *dev = &pdev->dev;
        struct ocelot_pinctrl *info;
        struct reset_control *reset;
@@ -1929,7 +1980,16 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
        if (!info)
                return -ENOMEM;
 
-       info->desc = (struct pinctrl_desc *)device_get_match_data(dev);
+       data = device_get_match_data(dev);
+       if (!data)
+               return -EINVAL;
+
+       info->desc = devm_kmemdup(dev, &data->desc, sizeof(*info->desc),
+                                 GFP_KERNEL);
+       if (!info->desc)
+               return -ENOMEM;
+
+       info->pincfg_data = &data->pincfg_data;
 
        reset = devm_reset_control_get_optional_shared(dev, "switch");
        if (IS_ERR(reset))
@@ -1956,7 +2016,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
 
        /* Pinconf registers */
        if (info->desc->confops) {
-               pincfg = ocelot_pinctrl_create_pincfg(pdev);
+               pincfg = ocelot_pinctrl_create_pincfg(pdev, info);
                if (IS_ERR(pincfg))
                        dev_dbg(dev, "Failed to create pincfg regmap\n");
                else
index 63429a2..770862f 100644 (file)
@@ -266,6 +266,8 @@ static int ralink_pinctrl_pins(struct ralink_priv *p)
                                                p->func[i]->pin_count,
                                                sizeof(int),
                                                GFP_KERNEL);
+               if (!p->func[i]->pins)
+                       return -ENOMEM;
                for (j = 0; j < p->func[i]->pin_count; j++)
                        p->func[i]->pins[j] = p->func[i]->pin_first + j;
 
index 57a33fb..14bcca7 100644 (file)
@@ -1338,16 +1338,18 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
        bank->secure_control = pctl->match_data->secure_control;
        spin_lock_init(&bank->lock);
 
-       /* create irq hierarchical domain */
-       bank->fwnode = fwnode;
+       if (pctl->domain) {
+               /* create irq hierarchical domain */
+               bank->fwnode = fwnode;
 
-       bank->domain = irq_domain_create_hierarchy(pctl->domain, 0,
-                                       STM32_GPIO_IRQ_LINE, bank->fwnode,
-                                       &stm32_gpio_domain_ops, bank);
+               bank->domain = irq_domain_create_hierarchy(pctl->domain, 0, STM32_GPIO_IRQ_LINE,
+                                                          bank->fwnode, &stm32_gpio_domain_ops,
+                                                          bank);
 
-       if (!bank->domain) {
-               err = -ENODEV;
-               goto err_clk;
+               if (!bank->domain) {
+                       err = -ENODEV;
+                       goto err_clk;
+               }
        }
 
        err = gpiochip_add_data(&bank->gpio_chip, bank);
@@ -1510,6 +1512,8 @@ int stm32_pctl_probe(struct platform_device *pdev)
        pctl->domain = stm32_pctrl_get_irq_domain(pdev);
        if (IS_ERR(pctl->domain))
                return PTR_ERR(pctl->domain);
+       if (!pctl->domain)
+               dev_warn(dev, "pinctrl without interrupt support\n");
 
        /* hwspinlock is optional */
        hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
index 3ba4704..2b3335a 100644 (file)
@@ -871,6 +871,9 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
        }
 
        *map = kcalloc(*num_maps + nmG, sizeof(**map), GFP_KERNEL);
+       if (*map == NULL)
+               return -ENOMEM;
+
        for (i = 0; i < (*num_maps); i++) {
                dt_pin = be32_to_cpu(list[i]);
                pin_num = FIELD_GET(GENMASK(31, 24), dt_pin);
index 4ada803..b5c1a8f 100644 (file)
@@ -158,26 +158,26 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
        SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "nand"),          /* DQ6 */
+                 SUNXI_FUNCTION(0x2, "nand0"),         /* DQ6 */
                  SUNXI_FUNCTION(0x3, "mmc2")),         /* D6 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "nand"),          /* DQ7 */
+                 SUNXI_FUNCTION(0x2, "nand0"),         /* DQ7 */
                  SUNXI_FUNCTION(0x3, "mmc2")),         /* D7 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "nand"),          /* DQS */
+                 SUNXI_FUNCTION(0x2, "nand0"),         /* DQS */
                  SUNXI_FUNCTION(0x3, "mmc2")),         /* RST */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "nand")),         /* CE2 */
+                 SUNXI_FUNCTION(0x2, "nand0")),        /* CE2 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 18),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "nand")),         /* CE3 */
+                 SUNXI_FUNCTION(0x2, "nand0")),        /* CE3 */
        /* Hole */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
index d9327d7..dd92840 100644 (file)
@@ -544,6 +544,8 @@ static int sunxi_pconf_set(struct pinctrl_dev *pctldev, unsigned pin,
        struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
        int i;
 
+       pin -= pctl->desc->pin_base;
+
        for (i = 0; i < num_configs; i++) {
                enum pin_config_param param;
                unsigned long flags;
index 2923daf..7b9c107 100644 (file)
@@ -890,6 +890,7 @@ nvsw_sn2201_create_static_devices(struct nvsw_sn2201 *nvsw_sn2201,
                                  int size)
 {
        struct mlxreg_hotplug_device *dev = devs;
+       int ret;
        int i;
 
        /* Create I2C static devices. */
@@ -901,6 +902,7 @@ nvsw_sn2201_create_static_devices(struct nvsw_sn2201 *nvsw_sn2201,
                                dev->nr, dev->brdinfo->addr);
 
                        dev->adapter = NULL;
+                       ret = PTR_ERR(dev->client);
                        goto fail_create_static_devices;
                }
        }
@@ -914,7 +916,7 @@ fail_create_static_devices:
                dev->client = NULL;
                dev->adapter = NULL;
        }
-       return IS_ERR(dev->client);
+       return ret;
 }
 
 static void nvsw_sn2201_destroy_static_devices(struct nvsw_sn2201 *nvsw_sn2201,
index f08ad85..bc4013e 100644 (file)
@@ -945,6 +945,8 @@ config PANASONIC_LAPTOP
        tristate "Panasonic Laptop Extras"
        depends on INPUT && ACPI
        depends on BACKLIGHT_CLASS_DEVICE
+       depends on ACPI_VIDEO=n || ACPI_VIDEO
+       depends on SERIO_I8042 || SERIO_I8042 = n
        select INPUT_SPARSEKMAP
        help
          This driver adds support for access to backlight control and hotkeys
index f11d18b..700eb19 100644 (file)
@@ -91,6 +91,8 @@
 #define AMD_CPU_ID_PCO                 AMD_CPU_ID_RV
 #define AMD_CPU_ID_CZN                 AMD_CPU_ID_RN
 #define AMD_CPU_ID_YC                  0x14B5
+#define AMD_CPU_ID_CB                  0x14D8
+#define AMD_CPU_ID_PS                  0x14E8
 
 #define PMC_MSG_DELAY_MIN_US           50
 #define RESPONSE_REGISTER_LOOP_MAX     20000
@@ -318,6 +320,8 @@ static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
                val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN);
                break;
        case AMD_CPU_ID_YC:
+       case AMD_CPU_ID_CB:
+       case AMD_CPU_ID_PS:
                val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
                break;
        default:
@@ -491,7 +495,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
                            &amd_pmc_idlemask_fops);
        /* Enable STB only when the module_param is set */
        if (enable_stb) {
-               if (dev->cpu_id == AMD_CPU_ID_YC)
+               if (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB ||
+                   dev->cpu_id == AMD_CPU_ID_PS)
                        debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
                                            &amd_pmc_stb_debugfs_fops_v2);
                else
@@ -615,6 +620,8 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
                return MSG_OS_HINT_PCO;
        case AMD_CPU_ID_RN:
        case AMD_CPU_ID_YC:
+       case AMD_CPU_ID_CB:
+       case AMD_CPU_ID_PS:
                return MSG_OS_HINT_RN;
        }
        return -EINVAL;
@@ -735,6 +742,8 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
 #endif
 
 static const struct pci_device_id pmc_pci_ids[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CB) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
@@ -877,7 +886,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
 
        mutex_init(&dev->lock);
 
-       if (enable_stb && dev->cpu_id == AMD_CPU_ID_YC) {
+       if (enable_stb && (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB)) {
                err = amd_pmc_s2d_init(dev);
                if (err)
                        return err;
@@ -915,6 +924,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = {
        {"AMDI0005", 0},
        {"AMDI0006", 0},
        {"AMDI0007", 0},
+       {"AMDI0008", 0},
        {"AMD0004", 0},
        {"AMD0005", 0},
        { }
index 57a07db..478dd30 100644 (file)
@@ -522,6 +522,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
        { KE_KEY, 0x32, { KEY_MUTE } },
        { KE_KEY, 0x35, { KEY_SCREENLOCK } },
+       { KE_KEY, 0x38, { KEY_PROG3 } }, /* Armoury Crate */
        { KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
        { KE_KEY, 0x41, { KEY_NEXTSONG } },
        { KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
@@ -574,6 +575,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0xA5, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + HDMI */
        { KE_KEY, 0xA6, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + HDMI */
        { KE_KEY, 0xA7, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + HDMI */
+       { KE_KEY, 0xB3, { KEY_PROG4 } }, /* AURA */
        { KE_KEY, 0xB5, { KEY_CALC } },
        { KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
        { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
index 497ad2f..5e7e665 100644 (file)
@@ -150,6 +150,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660 GAMING X DDR4"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660I AORUS PRO DDR4"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z490 AORUS ELITE AC"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
index 0d8cb22..bc7020e 100644 (file)
@@ -89,6 +89,7 @@ enum hp_wmi_event_ids {
        HPWMI_BACKLIT_KB_BRIGHTNESS     = 0x0D,
        HPWMI_PEAKSHIFT_PERIOD          = 0x0F,
        HPWMI_BATTERY_CHARGE_PERIOD     = 0x10,
+       HPWMI_SANITIZATION_MODE         = 0x17,
 };
 
 /*
@@ -853,6 +854,8 @@ static void hp_wmi_notify(u32 value, void *context)
                break;
        case HPWMI_BATTERY_CHARGE_PERIOD:
                break;
+       case HPWMI_SANITIZATION_MODE:
+               break;
        default:
                pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
                break;
index 3ccb7b7..abd0c81 100644 (file)
@@ -152,6 +152,10 @@ static bool no_bt_rfkill;
 module_param(no_bt_rfkill, bool, 0444);
 MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
 
+static bool allow_v4_dytc;
+module_param(allow_v4_dytc, bool, 0444);
+MODULE_PARM_DESC(allow_v4_dytc, "Enable DYTC version 4 platform-profile support.");
+
 /*
  * ACPI Helpers
  */
@@ -871,12 +875,18 @@ static void dytc_profile_refresh(struct ideapad_private *priv)
 static const struct dmi_system_id ideapad_dytc_v4_allow_table[] = {
        {
                /* Ideapad 5 Pro 16ACH6 */
-               .ident = "LENOVO 82L5",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "82L5")
                }
        },
+       {
+               /* Ideapad 5 15ITL05 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "IdeaPad 5 15ITL05")
+               }
+       },
        {}
 };
 
@@ -901,13 +911,16 @@ static int ideapad_dytc_profile_init(struct ideapad_private *priv)
 
        dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
 
-       if (dytc_version < 5) {
-               if (dytc_version < 4 || !dmi_check_system(ideapad_dytc_v4_allow_table)) {
-                       dev_info(&priv->platform_device->dev,
-                                "DYTC_VERSION is less than 4 or is not allowed: %d\n",
-                                dytc_version);
-                       return -ENODEV;
-               }
+       if (dytc_version < 4) {
+               dev_info(&priv->platform_device->dev, "DYTC_VERSION < 4 is not supported\n");
+               return -ENODEV;
+       }
+
+       if (dytc_version < 5 &&
+           !(allow_v4_dytc || dmi_check_system(ideapad_dytc_v4_allow_table))) {
+               dev_info(&priv->platform_device->dev,
+                        "DYTC_VERSION 4 support may not work. Pass ideapad_laptop.allow_v4_dytc=Y on the kernel commandline to enable\n");
+               return -ENODEV;
        }
 
        priv->dytc = kzalloc(sizeof(*priv->dytc), GFP_KERNEL);
index 5935dfc..10077a6 100644 (file)
@@ -50,7 +50,8 @@ static const struct dmi_system_id atomisp2_led_systems[] __initconst = {
        {
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+                       /* Non exact match to also match T100TAF */
+                       DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),
                },
                .driver_data = &asus_t100ta_lookup,
        },
index 7ce8964..c341a27 100644 (file)
@@ -1,6 +1,9 @@
 config INTEL_IFS
        tristate "Intel In Field Scan"
        depends on X86 && CPU_SUP_INTEL && 64BIT && SMP
+       # Discussion on the list has shown that the sysfs API needs a bit
+       # more work, mark this as broken for now
+       depends on BROKEN
        select INTEL_IFS_DEVICE
        help
          Enable support for the In Field Scan capability in select
index 40183bd..a1fe1e0 100644 (file)
@@ -1911,6 +1911,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L,      &icl_reg_map),
        X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,          &tgl_reg_map),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,         &tgl_reg_map),
+       X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N,         &tgl_reg_map),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,           &adl_reg_map),
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,        &tgl_reg_map),
        {}
index 37850d0..615e39c 100644 (file)
  *             - v0.1  start from toshiba_acpi driver written by John Belmonte
  */
 
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
+#include <linux/acpi.h>
 #include <linux/backlight.h>
 #include <linux/ctype.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#include <linux/slab.h>
-#include <linux/acpi.h>
+#include <linux/i8042.h>
+#include <linux/init.h>
 #include <linux/input.h>
 #include <linux/input/sparse-keymap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
-
+#include <linux/seq_file.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <acpi/video.h>
 
 MODULE_AUTHOR("Hiroshi Miura <miura@da-cha.org>");
 MODULE_AUTHOR("David Bronaugh <dbronaugh@linuxboxen.org>");
@@ -241,6 +243,42 @@ struct pcc_acpi {
        struct platform_device  *platform;
 };
 
+/*
+ * On some Panasonic models the volume up / down / mute keys send duplicate
+ * keypress events over the PS/2 kbd interface, filter these out.
+ */
+static bool panasonic_i8042_filter(unsigned char data, unsigned char str,
+                                  struct serio *port)
+{
+       static bool extended;
+
+       if (str & I8042_STR_AUXDATA)
+               return false;
+
+       if (data == 0xe0) {
+               extended = true;
+               return true;
+       } else if (extended) {
+               extended = false;
+
+               switch (data & 0x7f) {
+               case 0x20: /* e0 20 / e0 a0, Volume Mute press / release */
+               case 0x2e: /* e0 2e / e0 ae, Volume Down press / release */
+               case 0x30: /* e0 30 / e0 b0, Volume Up press / release */
+                       return true;
+               default:
+                       /*
+                        * Report the previously filtered e0 before continuing
+                        * with the next non-filtered byte.
+                        */
+                       serio_interrupt(port, 0xe0, 0);
+                       return false;
+               }
+       }
+
+       return false;
+}
+
 /* method access functions */
 static int acpi_pcc_write_sset(struct pcc_acpi *pcc, int func, int val)
 {
@@ -762,6 +800,8 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
        struct input_dev *hotk_input_dev = pcc->input_dev;
        int rc;
        unsigned long long result;
+       unsigned int key;
+       unsigned int updown;
 
        rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY,
                                   NULL, &result);
@@ -770,20 +810,27 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
                return;
        }
 
+       key = result & 0xf;
+       updown = result & 0x80; /* 0x80 == key down; 0x00 = key up */
+
        /* hack: some firmware sends no key down for sleep / hibernate */
-       if ((result & 0xf) == 0x7 || (result & 0xf) == 0xa) {
-               if (result & 0x80)
+       if (key == 7 || key == 10) {
+               if (updown)
                        sleep_keydown_seen = 1;
                if (!sleep_keydown_seen)
                        sparse_keymap_report_event(hotk_input_dev,
-                                       result & 0xf, 0x80, false);
+                                       key, 0x80, false);
        }
 
-       if ((result & 0xf) == 0x7 || (result & 0xf) == 0x9 || (result & 0xf) == 0xa) {
-               if (!sparse_keymap_report_event(hotk_input_dev,
-                                               result & 0xf, result & 0x80, false))
-                       pr_err("Unknown hotkey event: 0x%04llx\n", result);
-       }
+       /*
+        * Don't report brightness key-presses if they are also reported
+        * by the ACPI video bus.
+        */
+       if ((key == 1 || key == 2) && acpi_video_handles_brightness_key_presses())
+               return;
+
+       if (!sparse_keymap_report_event(hotk_input_dev, key, updown, false))
+               pr_err("Unknown hotkey event: 0x%04llx\n", result);
 }
 
 static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event)
@@ -997,6 +1044,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
                pcc->platform = NULL;
        }
 
+       i8042_install_filter(panasonic_i8042_filter);
        return 0;
 
 out_platform:
@@ -1020,6 +1068,8 @@ static int acpi_pcc_hotkey_remove(struct acpi_device *device)
        if (!device || !pcc)
                return -EINVAL;
 
+       i8042_remove_filter(panasonic_i8042_filter);
+
        if (pcc->platform) {
                device_remove_file(&pcc->platform->dev, &dev_attr_cdpower);
                platform_device_unregister(pcc->platform);
index e6cb4a1..a8b3830 100644 (file)
@@ -4529,6 +4529,7 @@ static void thinkpad_acpi_amd_s2idle_restore(void)
        iounmap(addr);
 cleanup_resource:
        release_resource(res);
+       kfree(res);
 }
 
 static struct acpi_s2idle_dev_ops thinkpad_acpi_s2idle_dev_ops = {
@@ -10299,21 +10300,15 @@ static struct ibm_struct proxsensor_driver_data = {
 #define DYTC_DISABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_MMC_BALANCE, 0)
 #define DYTC_ENABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_MMC_BALANCE, 1)
 
-enum dytc_profile_funcmode {
-       DYTC_FUNCMODE_NONE = 0,
-       DYTC_FUNCMODE_MMC,
-       DYTC_FUNCMODE_PSC,
-};
-
-static enum dytc_profile_funcmode dytc_profile_available;
 static enum platform_profile_option dytc_current_profile;
 static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
 static DEFINE_MUTEX(dytc_mutex);
+static int dytc_capabilities;
 static bool dytc_mmc_get_available;
 
 static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *profile)
 {
-       if (dytc_profile_available == DYTC_FUNCMODE_MMC) {
+       if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
                switch (dytcmode) {
                case DYTC_MODE_MMC_LOWPOWER:
                        *profile = PLATFORM_PROFILE_LOW_POWER;
@@ -10330,7 +10325,7 @@ static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *p
                }
                return 0;
        }
-       if (dytc_profile_available == DYTC_FUNCMODE_PSC) {
+       if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
                switch (dytcmode) {
                case DYTC_MODE_PSC_LOWPOWER:
                        *profile = PLATFORM_PROFILE_LOW_POWER;
@@ -10352,21 +10347,21 @@ static int convert_profile_to_dytc(enum platform_profile_option profile, int *pe
 {
        switch (profile) {
        case PLATFORM_PROFILE_LOW_POWER:
-               if (dytc_profile_available == DYTC_FUNCMODE_MMC)
+               if (dytc_capabilities & BIT(DYTC_FC_MMC))
                        *perfmode = DYTC_MODE_MMC_LOWPOWER;
-               else if (dytc_profile_available == DYTC_FUNCMODE_PSC)
+               else if (dytc_capabilities & BIT(DYTC_FC_PSC))
                        *perfmode = DYTC_MODE_PSC_LOWPOWER;
                break;
        case PLATFORM_PROFILE_BALANCED:
-               if (dytc_profile_available == DYTC_FUNCMODE_MMC)
+               if (dytc_capabilities & BIT(DYTC_FC_MMC))
                        *perfmode = DYTC_MODE_MMC_BALANCE;
-               else if (dytc_profile_available == DYTC_FUNCMODE_PSC)
+               else if (dytc_capabilities & BIT(DYTC_FC_PSC))
                        *perfmode = DYTC_MODE_PSC_BALANCE;
                break;
        case PLATFORM_PROFILE_PERFORMANCE:
-               if (dytc_profile_available == DYTC_FUNCMODE_MMC)
+               if (dytc_capabilities & BIT(DYTC_FC_MMC))
                        *perfmode = DYTC_MODE_MMC_PERFORM;
-               else if (dytc_profile_available == DYTC_FUNCMODE_PSC)
+               else if (dytc_capabilities & BIT(DYTC_FC_PSC))
                        *perfmode = DYTC_MODE_PSC_PERFORM;
                break;
        default: /* Unknown profile */
@@ -10445,7 +10440,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
        if (err)
                goto unlock;
 
-       if (dytc_profile_available == DYTC_FUNCMODE_MMC) {
+       if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
                if (profile == PLATFORM_PROFILE_BALANCED) {
                        /*
                         * To get back to balanced mode we need to issue a reset command.
@@ -10464,7 +10459,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
                                goto unlock;
                }
        }
-       if (dytc_profile_available == DYTC_FUNCMODE_PSC) {
+       if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
                err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output);
                if (err)
                        goto unlock;
@@ -10483,12 +10478,12 @@ static void dytc_profile_refresh(void)
        int perfmode;
 
        mutex_lock(&dytc_mutex);
-       if (dytc_profile_available == DYTC_FUNCMODE_MMC) {
+       if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
                if (dytc_mmc_get_available)
                        err = dytc_command(DYTC_CMD_MMC_GET, &output);
                else
                        err = dytc_cql_command(DYTC_CMD_GET, &output);
-       } else if (dytc_profile_available == DYTC_FUNCMODE_PSC)
+       } else if (dytc_capabilities & BIT(DYTC_FC_PSC))
                err = dytc_command(DYTC_CMD_GET, &output);
 
        mutex_unlock(&dytc_mutex);
@@ -10517,7 +10512,6 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
        set_bit(PLATFORM_PROFILE_BALANCED, dytc_profile.choices);
        set_bit(PLATFORM_PROFILE_PERFORMANCE, dytc_profile.choices);
 
-       dytc_profile_available = DYTC_FUNCMODE_NONE;
        err = dytc_command(DYTC_CMD_QUERY, &output);
        if (err)
                return err;
@@ -10530,13 +10524,12 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
                return -ENODEV;
 
        /* Check what capabilities are supported */
-       err = dytc_command(DYTC_CMD_FUNC_CAP, &output);
+       err = dytc_command(DYTC_CMD_FUNC_CAP, &dytc_capabilities);
        if (err)
                return err;
 
-       if (output & BIT(DYTC_FC_MMC)) { /* MMC MODE */
-               dytc_profile_available = DYTC_FUNCMODE_MMC;
-
+       if (dytc_capabilities & BIT(DYTC_FC_MMC)) { /* MMC MODE */
+               pr_debug("MMC is supported\n");
                /*
                 * Check if MMC_GET functionality available
                 * Version > 6 and return success from MMC_GET command
@@ -10547,8 +10540,13 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
                        if (!err && ((output & DYTC_ERR_MASK) == DYTC_ERR_SUCCESS))
                                dytc_mmc_get_available = true;
                }
-       } else if (output & BIT(DYTC_FC_PSC)) { /* PSC MODE */
-               dytc_profile_available = DYTC_FUNCMODE_PSC;
+       } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */
+               /* Support for this only works on AMD platforms */
+               if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+                       dbg_printk(TPACPI_DBG_INIT, "PSC not support on Intel platforms\n");
+                       return -ENODEV;
+               }
+               pr_debug("PSC is supported\n");
        } else {
                dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n");
                return -ENODEV;
@@ -10574,7 +10572,6 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
 
 static void dytc_profile_exit(void)
 {
-       dytc_profile_available = DYTC_FUNCMODE_NONE;
        platform_profile_remove();
 }
 
index f446be7..4803759 100644 (file)
@@ -27,8 +27,8 @@
 #include <linux/pinctrl/machine.h>
 #include <linux/platform_data/lp855x.h>
 #include <linux/platform_device.h>
-#include <linux/pm.h>
 #include <linux/power/bq24190_charger.h>
+#include <linux/reboot.h>
 #include <linux/rmi.h>
 #include <linux/serdev.h>
 #include <linux/spi/spi.h>
@@ -889,6 +889,7 @@ static const struct pinctrl_map lenovo_yoga_tab2_830_1050_codec_pinctrl_map =
                          "INT33FC:02", "pmu_clk2_grp", "pmu_clk");
 
 static struct pinctrl *lenovo_yoga_tab2_830_1050_codec_pinctrl;
+static struct sys_off_handler *lenovo_yoga_tab2_830_1050_sys_off_handler;
 
 static int __init lenovo_yoga_tab2_830_1050_init_codec(void)
 {
@@ -933,9 +934,11 @@ err_put_device:
  * followed by a normal 3 second press to recover. Avoid this by doing an EFI
  * poweroff instead.
  */
-static void lenovo_yoga_tab2_830_1050_power_off(void)
+static int lenovo_yoga_tab2_830_1050_power_off(struct sys_off_data *data)
 {
        efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
+
+       return NOTIFY_DONE;
 }
 
 static int __init lenovo_yoga_tab2_830_1050_init(void)
@@ -950,13 +953,19 @@ static int __init lenovo_yoga_tab2_830_1050_init(void)
        if (ret)
                return ret;
 
-       pm_power_off = lenovo_yoga_tab2_830_1050_power_off;
+       /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */
+       lenovo_yoga_tab2_830_1050_sys_off_handler =
+               register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_FIRMWARE + 1,
+                                        lenovo_yoga_tab2_830_1050_power_off, NULL);
+       if (IS_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler))
+               return PTR_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler);
+
        return 0;
 }
 
 static void lenovo_yoga_tab2_830_1050_exit(void)
 {
-       pm_power_off = NULL; /* Just turn poweroff into halt on module unload */
+       unregister_sys_off_handler(lenovo_yoga_tab2_830_1050_sys_off_handler);
 
        if (lenovo_yoga_tab2_830_1050_codec_pinctrl) {
                pinctrl_put(lenovo_yoga_tab2_830_1050_codec_pinctrl);
index 08d0a07..c7624d7 100644 (file)
@@ -146,6 +146,7 @@ static int __init versatile_reboot_probe(void)
        versatile_reboot_type = (enum versatile_reboot)reboot_id->data;
 
        syscon_regmap = syscon_node_to_regmap(np);
+       of_node_put(np);
        if (IS_ERR(syscon_regmap))
                return PTR_ERR(syscon_regmap);
 
index ec8a404..4339fa9 100644 (file)
@@ -3148,6 +3148,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
        ret = ab8500_fg_init_hw_registers(di);
        if (ret) {
                dev_err(dev, "failed to initialize registers\n");
+               destroy_workqueue(di->fg_wq);
                return ret;
        }
 
@@ -3159,6 +3160,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
        di->fg_psy = devm_power_supply_register(dev, &ab8500_fg_desc, &psy_cfg);
        if (IS_ERR(di->fg_psy)) {
                dev_err(dev, "failed to register FG psy\n");
+               destroy_workqueue(di->fg_wq);
                return PTR_ERR(di->fg_psy);
        }
 
@@ -3174,8 +3176,10 @@ static int ab8500_fg_probe(struct platform_device *pdev)
        /* Register primary interrupt handlers */
        for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq); i++) {
                irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
-               if (irq < 0)
+               if (irq < 0) {
+                       destroy_workqueue(di->fg_wq);
                        return irq;
+               }
 
                ret = devm_request_threaded_irq(dev, irq, NULL,
                                  ab8500_fg_irq[i].isr,
@@ -3185,6 +3189,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
                if (ret != 0) {
                        dev_err(dev, "failed to request %s IRQ %d: %d\n",
                                ab8500_fg_irq[i].name, irq, ret);
+                       destroy_workqueue(di->fg_wq);
                        return ret;
                }
                dev_dbg(dev, "Requested %s IRQ %d: %d\n",
@@ -3200,6 +3205,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
        ret = ab8500_fg_sysfs_init(di);
        if (ret) {
                dev_err(dev, "failed to create sysfs entry\n");
+               destroy_workqueue(di->fg_wq);
                return ret;
        }
 
@@ -3207,6 +3213,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(dev, "failed to create FG psy\n");
                ab8500_fg_sysfs_exit(di);
+               destroy_workqueue(di->fg_wq);
                return ret;
        }
 
index fad5890..470253c 100644 (file)
@@ -846,17 +846,17 @@ int power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *t
 {
        int i, high, low;
 
-       /* Break loop at table_len - 1 because that is the highest index */
-       for (i = 0; i < table_len - 1; i++)
+       for (i = 0; i < table_len; i++)
                if (temp > table[i].temp)
                        break;
 
        /* The library function will deal with high == low */
-       if ((i == 0) || (i == (table_len - 1)))
-               high = i;
+       if (i == 0)
+               high = low = i;
+       else if (i == table_len)
+               high = low = i - 1;
        else
-               high = i - 1;
-       low = i;
+               high = (low = i) - 1;
 
        return fixp_linear_interpolate(table[low].temp,
                                       table[low].resistance,
@@ -958,17 +958,17 @@ int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table,
 {
        int i, high, low;
 
-       /* Break loop at table_len - 1 because that is the highest index */
-       for (i = 0; i < table_len - 1; i++)
+       for (i = 0; i < table_len; i++)
                if (ocv > table[i].ocv)
                        break;
 
        /* The library function will deal with high == low */
-       if ((i == 0) || (i == (table_len - 1)))
-               high = i - 1;
+       if (i == 0)
+               high = low = i;
+       else if (i == table_len)
+               high = low = i - 1;
        else
-               high = i; /* i.e. i == 0 */
-       low = i;
+               high = (low = i) - 1;
 
        return fixp_linear_interpolate(table[low].ocv,
                                       table[low].capacity,
index 458218f..fe4971b 100644 (file)
@@ -176,6 +176,7 @@ config PTP_1588_CLOCK_OCP
        depends on !S390
        depends on COMMON_CLK
        select NET_DEVLINK
+       select CRC16
        help
          This driver adds support for an OpenCompute time card.
 
index 7dff94a..ef6e47d 100644 (file)
@@ -723,19 +723,19 @@ static const struct regulator_desc pms405_pldo600 = {
 
 static const struct regulator_desc mp5496_smpa2 = {
        .linear_ranges = (struct linear_range[]) {
-               REGULATOR_LINEAR_RANGE(725000, 0, 27, 12500),
+               REGULATOR_LINEAR_RANGE(600000, 0, 127, 12500),
        },
        .n_linear_ranges = 1,
-       .n_voltages = 28,
+       .n_voltages = 128,
        .ops = &rpm_mp5496_ops,
 };
 
 static const struct regulator_desc mp5496_ldoa2 = {
        .linear_ranges = (struct linear_range[]) {
-               REGULATOR_LINEAR_RANGE(1800000, 0, 60, 25000),
+               REGULATOR_LINEAR_RANGE(800000, 0, 127, 25000),
        },
        .n_linear_ranges = 1,
-       .n_voltages = 61,
+       .n_voltages = 128,
        .ops = &rpm_mp5496_ops,
 };
 
index cb24917..ae1d6ee 100644 (file)
@@ -60,7 +60,7 @@ static LIST_HEAD(sclp_reg_list);
 /* List of queued requests. */
 static LIST_HEAD(sclp_req_queue);
 
-/* Data for read and and init requests. */
+/* Data for read and init requests. */
 static struct sclp_req sclp_read_req;
 static struct sclp_req sclp_init_req;
 static void *sclp_read_sccb;
index 5c13d20..0a9045b 100644 (file)
@@ -1435,7 +1435,7 @@ static int __verify_queue_reservations(struct device_driver *drv, void *data)
        if (ap_drv->in_use) {
                rc = ap_drv->in_use(ap_perms.apm, newaqm);
                if (rc)
-                       return -EBUSY;
+                       rc = -EBUSY;
        }
 
        /* release the driver's module */
index 9e54fe7..35d4b39 100644 (file)
@@ -3565,7 +3565,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
                        if (!atomic_read(&queue->set_pci_flags_count)) {
                                /*
                                 * there's no outstanding PCI any more, so we
-                                * have to request a PCI to be sure the the PCI
+                                * have to request a PCI to be sure the PCI
                                 * will wake at some time in the future then we
                                 * can flush packed buffers that might still be
                                 * hanging around, which can happen if no
index 97e51c3..161d3b1 100644 (file)
@@ -1136,8 +1136,13 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
                        vcdev->err = -EIO;
        }
        virtio_ccw_check_activity(vcdev, activity);
-       /* Interrupts are disabled here */
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+       /*
+        * Paired with virtio_ccw_synchronize_cbs() and interrupts are
+        * disabled here.
+        */
        read_lock(&vcdev->irq_lock);
+#endif
        for_each_set_bit(i, indicators(vcdev),
                         sizeof(*indicators(vcdev)) * BITS_PER_BYTE) {
                /* The bit clear must happen before the vring kick. */
@@ -1146,7 +1151,9 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
                vq = virtio_ccw_vq_by_ind(vcdev, i);
                vring_interrupt(0, vq);
        }
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
        read_unlock(&vcdev->irq_lock);
+#endif
        if (test_bit(0, indicators2(vcdev))) {
                virtio_config_changed(&vcdev->vdev);
                clear_bit(0, indicators2(vcdev));
index 7d819fc..eb86afb 100644 (file)
@@ -2782,6 +2782,7 @@ static int slave_configure_v3_hw(struct scsi_device *sdev)
        struct hisi_hba *hisi_hba = shost_priv(shost);
        struct device *dev = hisi_hba->dev;
        int ret = sas_slave_configure(sdev);
+       unsigned int max_sectors;
 
        if (ret)
                return ret;
@@ -2799,6 +2800,12 @@ static int slave_configure_v3_hw(struct scsi_device *sdev)
                }
        }
 
+       /* Set according to IOMMU IOVA caching limit */
+       max_sectors = min_t(size_t, queue_max_hw_sectors(sdev->request_queue),
+                           (PAGE_SIZE * 32) >> SECTOR_SHIFT);
+
+       blk_queue_max_hw_sectors(sdev->request_queue, max_sectors);
+
        return 0;
 }
 
index d0eab57..00684e1 100644 (file)
@@ -160,8 +160,8 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *);
 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
 static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
 
-static void ibmvfc_release_sub_crqs(struct ibmvfc_host *);
-static void ibmvfc_init_sub_crqs(struct ibmvfc_host *);
+static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *);
+static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *);
 
 static const char *unknown_error = "unknown error";
 
@@ -917,7 +917,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
        struct vio_dev *vdev = to_vio_dev(vhost->dev);
        unsigned long flags;
 
-       ibmvfc_release_sub_crqs(vhost);
+       ibmvfc_dereg_sub_crqs(vhost);
 
        /* Re-enable the CRQ */
        do {
@@ -936,7 +936,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
        spin_unlock(vhost->crq.q_lock);
        spin_unlock_irqrestore(vhost->host->host_lock, flags);
 
-       ibmvfc_init_sub_crqs(vhost);
+       ibmvfc_reg_sub_crqs(vhost);
 
        return rc;
 }
@@ -955,7 +955,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
        struct vio_dev *vdev = to_vio_dev(vhost->dev);
        struct ibmvfc_queue *crq = &vhost->crq;
 
-       ibmvfc_release_sub_crqs(vhost);
+       ibmvfc_dereg_sub_crqs(vhost);
 
        /* Close the CRQ */
        do {
@@ -988,7 +988,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
        spin_unlock(vhost->crq.q_lock);
        spin_unlock_irqrestore(vhost->host->host_lock, flags);
 
-       ibmvfc_init_sub_crqs(vhost);
+       ibmvfc_reg_sub_crqs(vhost);
 
        return rc;
 }
@@ -5682,6 +5682,8 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
        queue->cur = 0;
        queue->fmt = fmt;
        queue->size = PAGE_SIZE / fmt_size;
+
+       queue->vhost = vhost;
        return 0;
 }
 
@@ -5757,9 +5759,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
 
        ENTER;
 
-       if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT))
-               return -ENOMEM;
-
        rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
                           &scrq->cookie, &scrq->hw_irq);
 
@@ -5790,7 +5789,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
        }
 
        scrq->hwq_id = index;
-       scrq->vhost = vhost;
 
        LEAVE;
        return 0;
@@ -5800,7 +5798,6 @@ irq_failed:
                rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
        } while (rtas_busy_delay(rc));
 reg_failed:
-       ibmvfc_free_queue(vhost, scrq);
        LEAVE;
        return rc;
 }
@@ -5826,12 +5823,50 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
        if (rc)
                dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
 
-       ibmvfc_free_queue(vhost, scrq);
+       /* Clean out the queue */
+       memset(scrq->msgs.crq, 0, PAGE_SIZE);
+       scrq->cur = 0;
+
+       LEAVE;
+}
+
+static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost)
+{
+       int i, j;
+
+       ENTER;
+       if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
+               return;
+
+       for (i = 0; i < nr_scsi_hw_queues; i++) {
+               if (ibmvfc_register_scsi_channel(vhost, i)) {
+                       for (j = i; j > 0; j--)
+                               ibmvfc_deregister_scsi_channel(vhost, j - 1);
+                       vhost->do_enquiry = 0;
+                       return;
+               }
+       }
+
+       LEAVE;
+}
+
+static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost)
+{
+       int i;
+
+       ENTER;
+       if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
+               return;
+
+       for (i = 0; i < nr_scsi_hw_queues; i++)
+               ibmvfc_deregister_scsi_channel(vhost, i);
+
        LEAVE;
 }
 
 static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
 {
+       struct ibmvfc_queue *scrq;
        int i, j;
 
        ENTER;
@@ -5847,30 +5882,41 @@ static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
        }
 
        for (i = 0; i < nr_scsi_hw_queues; i++) {
-               if (ibmvfc_register_scsi_channel(vhost, i)) {
-                       for (j = i; j > 0; j--)
-                               ibmvfc_deregister_scsi_channel(vhost, j - 1);
+               scrq = &vhost->scsi_scrqs.scrqs[i];
+               if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) {
+                       for (j = i; j > 0; j--) {
+                               scrq = &vhost->scsi_scrqs.scrqs[j - 1];
+                               ibmvfc_free_queue(vhost, scrq);
+                       }
                        kfree(vhost->scsi_scrqs.scrqs);
                        vhost->scsi_scrqs.scrqs = NULL;
                        vhost->scsi_scrqs.active_queues = 0;
                        vhost->do_enquiry = 0;
-                       break;
+                       vhost->mq_enabled = 0;
+                       return;
                }
        }
 
+       ibmvfc_reg_sub_crqs(vhost);
+
        LEAVE;
 }
 
 static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
 {
+       struct ibmvfc_queue *scrq;
        int i;
 
        ENTER;
        if (!vhost->scsi_scrqs.scrqs)
                return;
 
-       for (i = 0; i < nr_scsi_hw_queues; i++)
-               ibmvfc_deregister_scsi_channel(vhost, i);
+       ibmvfc_dereg_sub_crqs(vhost);
+
+       for (i = 0; i < nr_scsi_hw_queues; i++) {
+               scrq = &vhost->scsi_scrqs.scrqs[i];
+               ibmvfc_free_queue(vhost, scrq);
+       }
 
        kfree(vhost->scsi_scrqs.scrqs);
        vhost->scsi_scrqs.scrqs = NULL;
index 3718406..c39a245 100644 (file)
@@ -789,6 +789,7 @@ struct ibmvfc_queue {
        spinlock_t _lock;
        spinlock_t *q_lock;
 
+       struct ibmvfc_host *vhost;
        struct ibmvfc_event_pool evt_pool;
        struct list_head sent;
        struct list_head free;
@@ -797,7 +798,6 @@ struct ibmvfc_queue {
        union ibmvfc_iu cancel_rsp;
 
        /* Sub-CRQ fields */
-       struct ibmvfc_host *vhost;
        unsigned long cookie;
        unsigned long vios_cookie;
        unsigned long hw_irq;
index c95360a..0917b05 100644 (file)
@@ -3195,6 +3195,9 @@ static int megasas_map_queues(struct Scsi_Host *shost)
        qoff += map->nr_queues;
        offset += map->nr_queues;
 
+       /* we never use READ queue, so can't cheat blk-mq */
+       shost->tag_set.map[HCTX_TYPE_READ].nr_queues = 0;
+
        /* Setup Poll hctx */
        map = &shost->tag_set.map[HCTX_TYPE_POLL];
        map->nr_queues = instance->iopoll_q_count;
index b519f4b..5e8887f 100644 (file)
@@ -11386,6 +11386,7 @@ scsih_shutdown(struct pci_dev *pdev)
        _scsih_ir_shutdown(ioc);
        _scsih_nvme_shutdown(ioc);
        mpt3sas_base_mask_interrupts(ioc);
+       mpt3sas_base_stop_watchdog(ioc);
        ioc->shost_recovery = 1;
        mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
        ioc->shost_recovery = 0;
index f7466a8..991eb01 100644 (file)
@@ -3145,15 +3145,6 @@ void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
        if (!phy->phy_attached)
                return;
 
-       if (sas_phy->phy) {
-               struct sas_phy *sphy = sas_phy->phy;
-               sphy->negotiated_linkrate = sas_phy->linkrate;
-               sphy->minimum_linkrate = phy->minimum_linkrate;
-               sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
-               sphy->maximum_linkrate = phy->maximum_linkrate;
-               sphy->maximum_linkrate_hw = phy->maximum_linkrate;
-       }
-
        if (phy->phy_type & PORT_TYPE_SAS) {
                struct sas_identify_frame *id;
                id = (struct sas_identify_frame *)phy->frame_rcvd;
@@ -3177,26 +3168,22 @@ void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
        switch (link_rate) {
        case PHY_SPEED_120:
                phy->sas_phy.linkrate = SAS_LINK_RATE_12_0_GBPS;
-               phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_12_0_GBPS;
                break;
        case PHY_SPEED_60:
                phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
-               phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
                break;
        case PHY_SPEED_30:
                phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS;
-               phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
                break;
        case PHY_SPEED_15:
                phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS;
-               phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
                break;
        }
        sas_phy->negotiated_linkrate = phy->sas_phy.linkrate;
-       sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_6_0_GBPS;
+       sas_phy->maximum_linkrate_hw = phy->maximum_linkrate;
        sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
-       sas_phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
-       sas_phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+       sas_phy->maximum_linkrate = phy->maximum_linkrate;
+       sas_phy->minimum_linkrate = phy->minimum_linkrate;
 }
 
 /**
index 9b04f1a..01f2f41 100644 (file)
@@ -143,6 +143,8 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
        struct asd_sas_phy *sas_phy = &phy->sas_phy;
        phy->phy_state = PHY_LINK_DISABLE;
        phy->pm8001_ha = pm8001_ha;
+       phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+       phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
        sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
        sas_phy->class = SAS;
        sas_phy->iproto = SAS_PROTOCOL_ALL;
index 01c5e8f..303cd05 100644 (file)
@@ -3723,8 +3723,12 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x\n",
                   phyid, status);
        if (status == PHY_STOP_SUCCESS ||
-               status == PHY_STOP_ERR_DEVICE_ATTACHED)
+               status == PHY_STOP_ERR_DEVICE_ATTACHED) {
                phy->phy_state = PHY_LINK_DISABLE;
+               phy->sas_phy.phy->negotiated_linkrate = SAS_PHY_DISABLED;
+               phy->sas_phy.linkrate = SAS_PHY_DISABLED;
+       }
+
        return 0;
 }
 
index 1f423f7..b8a76b8 100644 (file)
@@ -2826,6 +2826,24 @@ static void zbc_open_zone(struct sdebug_dev_info *devip,
        }
 }
 
+static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
+                                    struct sdeb_zone_state *zsp)
+{
+       switch (zsp->z_cond) {
+       case ZC2_IMPLICIT_OPEN:
+               devip->nr_imp_open--;
+               break;
+       case ZC3_EXPLICIT_OPEN:
+               devip->nr_exp_open--;
+               break;
+       default:
+               WARN_ONCE(true, "Invalid zone %llu condition %x\n",
+                         zsp->z_start, zsp->z_cond);
+               break;
+       }
+       zsp->z_cond = ZC5_FULL;
+}
+
 static void zbc_inc_wp(struct sdebug_dev_info *devip,
                       unsigned long long lba, unsigned int num)
 {
@@ -2838,7 +2856,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
        if (zsp->z_type == ZBC_ZTYPE_SWR) {
                zsp->z_wp += num;
                if (zsp->z_wp >= zend)
-                       zsp->z_cond = ZC5_FULL;
+                       zbc_set_zone_full(devip, zsp);
                return;
        }
 
@@ -2857,7 +2875,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
                        n = num;
                }
                if (zsp->z_wp >= zend)
-                       zsp->z_cond = ZC5_FULL;
+                       zbc_set_zone_full(devip, zsp);
 
                num -= n;
                lba += n;
index a480c4d..729e309 100644 (file)
@@ -450,7 +450,7 @@ static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, fmode_t mode)
                goto out_put_request;
 
        ret = 0;
-       if (hdr->iovec_count) {
+       if (hdr->iovec_count && hdr->dxfer_len) {
                struct iov_iter i;
                struct iovec *iov = NULL;
 
index 2c0dd64..5d21f07 100644 (file)
@@ -212,7 +212,12 @@ iscsi_create_endpoint(int dd_size)
                return NULL;
 
        mutex_lock(&iscsi_ep_idr_mutex);
-       id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);
+
+       /*
+        * First endpoint id should be 1 to comply with user space
+        * applications (iscsid).
+        */
+       id = idr_alloc(&iscsi_ep_idr, ep, 1, -1, GFP_NOIO);
        if (id < 0) {
                mutex_unlock(&iscsi_ep_idr_mutex);
                printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
index ca35309..fe000da 100644 (file)
@@ -1844,7 +1844,7 @@ static struct scsi_host_template scsi_driver = {
        .cmd_per_lun =          2048,
        .this_id =              -1,
        /* Ensure there are no gaps in presented sgls */
-       .virt_boundary_mask =   PAGE_SIZE-1,
+       .virt_boundary_mask =   HV_HYP_PAGE_SIZE - 1,
        .no_write_same =        1,
        .track_queue_depth =    1,
        .change_queue_depth =   storvsc_change_queue_depth,
@@ -1895,6 +1895,7 @@ static int storvsc_probe(struct hv_device *device,
        int target = 0;
        struct storvsc_device *stor_device;
        int max_sub_channels = 0;
+       u32 max_xfer_bytes;
 
        /*
         * We support sub-channels for storage on SCSI and FC controllers.
@@ -1968,12 +1969,28 @@ static int storvsc_probe(struct hv_device *device,
        }
        /* max cmd length */
        host->max_cmd_len = STORVSC_MAX_CMD_LEN;
-
        /*
-        * set the table size based on the info we got
-        * from the host.
+        * Any reasonable Hyper-V configuration should provide
+        * max_transfer_bytes value aligning to HV_HYP_PAGE_SIZE,
+        * protecting it from any weird value.
+        */
+       max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE);
+       /* max_hw_sectors_kb */
+       host->max_sectors = max_xfer_bytes >> 9;
+       /*
+        * There are 2 requirements for Hyper-V storvsc sgl segments,
+        * based on which the below calculation for max segments is
+        * done:
+        *
+        * 1. Except for the first and last sgl segment, all sgl segments
+        *    should be align to HV_HYP_PAGE_SIZE, that also means the
+        *    maximum number of segments in a sgl can be calculated by
+        *    dividing the total max transfer length by HV_HYP_PAGE_SIZE.
+        *
+        * 2. Except for the first and last, each entry in the SGL must
+        *    have an offset that is a multiple of HV_HYP_PAGE_SIZE.
         */
-       host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
+       host->sg_tablesize = (max_xfer_bytes >> HV_HYP_PAGE_SHIFT) + 1;
        /*
         * For non-IDE disks, the host supports multiple channels.
         * Set the number of HW queues we are supporting.
index b2d365a..dae8a2e 100644 (file)
@@ -91,14 +91,14 @@ static const struct at91_soc socs[] __initconst = {
        AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
                 AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH,
                 "sam9x60", "sam9x60"),
-       AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D5M_EXID_MATCH,
-                AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH,
+       AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+                AT91_CIDR_VERSION_MASK, SAM9X60_D5M_EXID_MATCH,
                 "sam9x60 64MiB DDR2 SiP", "sam9x60"),
-       AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D1G_EXID_MATCH,
-                AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH,
+       AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+                AT91_CIDR_VERSION_MASK, SAM9X60_D1G_EXID_MATCH,
                 "sam9x60 128MiB DDR2 SiP", "sam9x60"),
-       AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D6K_EXID_MATCH,
-                AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH,
+       AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+                AT91_CIDR_VERSION_MASK, SAM9X60_D6K_EXID_MATCH,
                 "sam9x60 8MiB SDRAM SiP", "sam9x60"),
 #endif
 #ifdef CONFIG_SOC_SAMA5
index 3cbb165..70ad0f3 100644 (file)
@@ -783,6 +783,7 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
        }
 
        ret = brcmstb_init_sram(dn);
+       of_node_put(dn);
        if (ret) {
                pr_err("error setting up SRAM for PM\n");
                return ret;
index 7f49385..7ebc287 100644 (file)
@@ -667,7 +667,7 @@ static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[
        },
        [IMX8MP_MEDIABLK_PD_LCDIF_2] = {
                .name = "mediablk-lcdif-2",
-               .clk_names = (const char *[]){ "disp1", "apb", "axi", },
+               .clk_names = (const char *[]){ "disp2", "apb", "axi", },
                .num_clks = 3,
                .gpc_name = "lcdif2",
                .rst_mask = BIT(11) | BIT(12) | BIT(24),
index 613935c..58240e3 100644 (file)
@@ -758,7 +758,7 @@ static const struct of_device_id ixp4xx_npe_of_match[] = {
 static struct platform_driver ixp4xx_npe_driver = {
        .driver = {
                .name           = "ixp4xx-npe",
-               .of_match_table = of_match_ptr(ixp4xx_npe_of_match),
+               .of_match_table = ixp4xx_npe_of_match,
        },
        .probe = ixp4xx_npe_probe,
        .remove = ixp4xx_npe_remove,
index 3e95835..4f163d6 100644 (file)
@@ -926,7 +926,7 @@ qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
        struct smem_partition_header *header;
        struct smem_ptable_entry *entry;
        struct smem_ptable *ptable;
-       unsigned int remote_host;
+       u16 remote_host;
        u16 host0, host1;
        int i;
 
@@ -951,12 +951,12 @@ qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
                        continue;
 
                if (remote_host >= SMEM_HOST_COUNT) {
-                       dev_err(smem->dev, "bad host %hu\n", remote_host);
+                       dev_err(smem->dev, "bad host %u\n", remote_host);
                        return -EINVAL;
                }
 
                if (smem->partitions[remote_host].virt_base) {
-                       dev_err(smem->dev, "duplicate host %hu\n", remote_host);
+                       dev_err(smem->dev, "duplicate host %u\n", remote_host);
                        return -EINVAL;
                }
 
index cba6a44..efdcbe6 100644 (file)
@@ -33,6 +33,7 @@
 #define AMD_SPI_RX_COUNT_REG   0x4B
 #define AMD_SPI_STATUS_REG     0x4C
 
+#define AMD_SPI_FIFO_SIZE      70
 #define AMD_SPI_MEM_SIZE       200
 
 /* M_CMD OP codes for SPI */
@@ -270,6 +271,11 @@ static int amd_spi_master_transfer(struct spi_master *master,
        return 0;
 }
 
+static size_t amd_spi_max_transfer_size(struct spi_device *spi)
+{
+       return AMD_SPI_FIFO_SIZE;
+}
+
 static int amd_spi_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -302,6 +308,8 @@ static int amd_spi_probe(struct platform_device *pdev)
        master->flags = SPI_MASTER_HALF_DUPLEX;
        master->setup = amd_spi_master_setup;
        master->transfer_one_message = amd_spi_master_transfer;
+       master->max_transfer_size = amd_spi_max_transfer_size;
+       master->max_message_size = amd_spi_max_transfer_size;
 
        /* Register the controller with SPI framework */
        err = devm_spi_register_master(dev, master);
index 496f3e1..3e891bf 100644 (file)
@@ -558,6 +558,14 @@ static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
        u32 ctl_val;
        int ret = 0;
 
+       dev_dbg(aspi->dev,
+               "CE%d %s dirmap [ 0x%.8llx - 0x%.8llx ] OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x\n",
+               chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
+               desc->info.offset, desc->info.offset + desc->info.length,
+               op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+               op->dummy.buswidth, op->data.buswidth,
+               op->addr.nbytes, op->dummy.nbytes);
+
        chip->clk_freq = desc->mem->spi->max_speed_hz;
 
        /* Only for reads */
@@ -574,9 +582,11 @@ static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
        ctl_val = readl(chip->ctl) & ~CTRL_IO_CMD_MASK;
        ctl_val |= aspeed_spi_get_io_mode(op) |
                op->cmd.opcode << CTRL_COMMAND_SHIFT |
-               CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth) |
                CTRL_IO_MODE_READ;
 
+       if (op->dummy.nbytes)
+               ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth);
+
        /* Tune 4BYTE address mode */
        if (op->addr.nbytes) {
                u32 addr_mode = readl(aspi->regs + CE_CTRL_REG);
index 775c0bf..0933948 100644 (file)
@@ -1138,10 +1138,14 @@ static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
 
        /* if an error occurred and we have an active dma, then terminate */
-       dmaengine_terminate_sync(ctlr->dma_tx);
-       bs->tx_dma_active = false;
-       dmaengine_terminate_sync(ctlr->dma_rx);
-       bs->rx_dma_active = false;
+       if (ctlr->dma_tx) {
+               dmaengine_terminate_sync(ctlr->dma_tx);
+               bs->tx_dma_active = false;
+       }
+       if (ctlr->dma_rx) {
+               dmaengine_terminate_sync(ctlr->dma_rx);
+               bs->rx_dma_active = false;
+       }
        bcm2835_spi_undo_prologue(bs);
 
        /* and reset */
index 2b9fc84..72b1a5a 100644 (file)
@@ -1578,8 +1578,7 @@ static int cqspi_probe(struct platform_device *pdev)
        ret = cqspi_of_get_pdata(cqspi);
        if (ret) {
                dev_err(dev, "Cannot get mandatory OF data.\n");
-               ret = -ENODEV;
-               goto probe_master_put;
+               return -ENODEV;
        }
 
        /* Obtain QSPI clock. */
@@ -1587,7 +1586,7 @@ static int cqspi_probe(struct platform_device *pdev)
        if (IS_ERR(cqspi->clk)) {
                dev_err(dev, "Cannot claim QSPI clock.\n");
                ret = PTR_ERR(cqspi->clk);
-               goto probe_master_put;
+               return ret;
        }
 
        /* Obtain and remap controller address. */
@@ -1596,7 +1595,7 @@ static int cqspi_probe(struct platform_device *pdev)
        if (IS_ERR(cqspi->iobase)) {
                dev_err(dev, "Cannot remap controller address.\n");
                ret = PTR_ERR(cqspi->iobase);
-               goto probe_master_put;
+               return ret;
        }
 
        /* Obtain and remap AHB address. */
@@ -1605,7 +1604,7 @@ static int cqspi_probe(struct platform_device *pdev)
        if (IS_ERR(cqspi->ahb_base)) {
                dev_err(dev, "Cannot remap AHB address.\n");
                ret = PTR_ERR(cqspi->ahb_base);
-               goto probe_master_put;
+               return ret;
        }
        cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
        cqspi->ahb_size = resource_size(res_ahb);
@@ -1614,15 +1613,13 @@ static int cqspi_probe(struct platform_device *pdev)
 
        /* Obtain IRQ line. */
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               ret = -ENXIO;
-               goto probe_master_put;
-       }
+       if (irq < 0)
+               return -ENXIO;
 
        pm_runtime_enable(dev);
        ret = pm_runtime_resume_and_get(dev);
        if (ret < 0)
-               goto probe_master_put;
+               return ret;
 
        ret = clk_prepare_enable(cqspi->clk);
        if (ret) {
@@ -1716,8 +1713,6 @@ probe_reset_failed:
 probe_clk_failed:
        pm_runtime_put_sync(dev);
        pm_runtime_disable(dev);
-probe_master_put:
-       spi_master_put(master);
        return ret;
 }
 
index a23d4f6..6a7f7df 100644 (file)
@@ -69,6 +69,7 @@
 #define CDNS_SPI_BAUD_DIV_SHIFT                3 /* Baud rate divisor shift in CR */
 #define CDNS_SPI_SS_SHIFT              10 /* Slave Select field shift in CR */
 #define CDNS_SPI_SS0                   0x1 /* Slave Select zero */
+#define CDNS_SPI_NOSS                  0xF /* No Slave select */
 
 /*
  * SPI Interrupt Registers bit Masks
@@ -92,9 +93,6 @@
 #define CDNS_SPI_ER_ENABLE     0x00000001 /* SPI Enable Bit Mask */
 #define CDNS_SPI_ER_DISABLE    0x0 /* SPI Disable Bit Mask */
 
-/* SPI FIFO depth in bytes */
-#define CDNS_SPI_FIFO_DEPTH    128
-
 /* Default number of chip select lines */
 #define CDNS_SPI_DEFAULT_NUM_CS                4
 
  * @rx_bytes:          Number of bytes requested
  * @dev_busy:          Device busy flag
  * @is_decoded_cs:     Flag for decoder property set or not
+ * @tx_fifo_depth:     Depth of the TX FIFO
  */
 struct cdns_spi {
        void __iomem *regs;
@@ -123,6 +122,7 @@ struct cdns_spi {
        int rx_bytes;
        u8 dev_busy;
        u32 is_decoded_cs;
+       unsigned int tx_fifo_depth;
 };
 
 /* Macros for the SPI controller read/write */
@@ -304,7 +304,7 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
 {
        unsigned long trans_cnt = 0;
 
-       while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) &&
+       while ((trans_cnt < xspi->tx_fifo_depth) &&
               (xspi->tx_bytes > 0)) {
 
                /* When xspi in busy condition, bytes may send failed,
@@ -450,20 +450,43 @@ static int cdns_prepare_transfer_hardware(struct spi_master *master)
  * @master:    Pointer to the spi_master structure which provides
  *             information about the controller.
  *
- * This function disables the SPI master controller.
+ * This function disables the SPI master controller when no slave selected.
  *
  * Return:     0 always
  */
 static int cdns_unprepare_transfer_hardware(struct spi_master *master)
 {
        struct cdns_spi *xspi = spi_master_get_devdata(master);
+       u32 ctrl_reg;
 
-       cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
+       /* Disable the SPI if slave is deselected */
+       ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
+       ctrl_reg = (ctrl_reg & CDNS_SPI_CR_SSCTRL) >>  CDNS_SPI_SS_SHIFT;
+       if (ctrl_reg == CDNS_SPI_NOSS)
+               cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
 
        return 0;
 }
 
 /**
+ * cdns_spi_detect_fifo_depth - Detect the FIFO depth of the hardware
+ * @xspi:      Pointer to the cdns_spi structure
+ *
+ * The depth of the TX FIFO is a synthesis configuration parameter of the SPI
+ * IP. The FIFO threshold register is sized so that its maximum value can be the
+ * FIFO size - 1. This is used to detect the size of the FIFO.
+ */
+static void cdns_spi_detect_fifo_depth(struct cdns_spi *xspi)
+{
+       /* The MSBs will get truncated giving us the size of the FIFO */
+       cdns_spi_write(xspi, CDNS_SPI_THLD, 0xffff);
+       xspi->tx_fifo_depth = cdns_spi_read(xspi, CDNS_SPI_THLD) + 1;
+
+       /* Reset to default */
+       cdns_spi_write(xspi, CDNS_SPI_THLD, 0x1);
+}
+
+/**
  * cdns_spi_probe - Probe method for the SPI driver
  * @pdev:      Pointer to the platform_device structure
  *
@@ -535,6 +558,8 @@ static int cdns_spi_probe(struct platform_device *pdev)
        if (ret < 0)
                xspi->is_decoded_cs = 0;
 
+       cdns_spi_detect_fifo_depth(xspi);
+
        /* SPI controller initializations */
        cdns_spi_init_hw(xspi);
 
index e8de4f5..0c79193 100644 (file)
@@ -808,7 +808,7 @@ int spi_mem_poll_status(struct spi_mem *mem,
            op->data.dir != SPI_MEM_DATA_IN)
                return -EINVAL;
 
-       if (ctlr->mem_ops && ctlr->mem_ops->poll_status) {
+       if (ctlr->mem_ops && ctlr->mem_ops->poll_status && !mem->spi->cs_gpiod) {
                ret = spi_mem_access_start(mem);
                if (ret)
                        return ret;
index a08215e..79242dc 100644 (file)
@@ -381,15 +381,18 @@ static int rockchip_spi_prepare_irq(struct rockchip_spi *rs,
        rs->tx_left = rs->tx ? xfer->len / rs->n_bytes : 0;
        rs->rx_left = xfer->len / rs->n_bytes;
 
-       if (rs->cs_inactive)
-               writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
-       else
-               writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
+       writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
+
        spi_enable_chip(rs, true);
 
        if (rs->tx_left)
                rockchip_spi_pio_writer(rs);
 
+       if (rs->cs_inactive)
+               writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
+       else
+               writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
+
        /* 1 means the transfer is in progress */
        return 1;
 }
index 7a014ee..411b130 100644 (file)
@@ -613,6 +613,10 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
                                               rspi->dma_callbacked, HZ);
        if (ret > 0 && rspi->dma_callbacked) {
                ret = 0;
+               if (tx)
+                       dmaengine_synchronize(rspi->ctlr->dma_tx);
+               if (rx)
+                       dmaengine_synchronize(rspi->ctlr->dma_rx);
        } else {
                if (!ret) {
                        dev_err(&rspi->ctlr->dev, "DMA timeout\n");
index d1a0dea..d0ba34c 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 config FB_OLPC_DCON
        tristate "One Laptop Per Child Display CONtroller support"
-       depends on OLPC && FB
+       depends on OLPC && FB && BROKEN
        depends on I2C
        depends on GPIO_CS5535 && ACPI
        select BACKLIGHT_CLASS_DEVICE
index 3d8e9de..7135d89 100644 (file)
@@ -178,8 +178,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
 
        pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
 
-       res = rtw_alloc_hwxmits(padapter);
-       if (res) {
+       if (rtw_alloc_hwxmits(padapter)) {
                res = _FAIL;
                goto exit;
        }
@@ -1483,19 +1482,10 @@ int rtw_alloc_hwxmits(struct adapter *padapter)
 
        hwxmits = pxmitpriv->hwxmits;
 
-       if (pxmitpriv->hwxmit_entry == 5) {
-               hwxmits[0] .sta_queue = &pxmitpriv->bm_pending;
-               hwxmits[1] .sta_queue = &pxmitpriv->vo_pending;
-               hwxmits[2] .sta_queue = &pxmitpriv->vi_pending;
-               hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
-               hwxmits[4] .sta_queue = &pxmitpriv->be_pending;
-       } else if (pxmitpriv->hwxmit_entry == 4) {
-               hwxmits[0] .sta_queue = &pxmitpriv->vo_pending;
-               hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
-               hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
-               hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
-       } else {
-       }
+       hwxmits[0].sta_queue = &pxmitpriv->vo_pending;
+       hwxmits[1].sta_queue = &pxmitpriv->vi_pending;
+       hwxmits[2].sta_queue = &pxmitpriv->be_pending;
+       hwxmits[3].sta_queue = &pxmitpriv->bk_pending;
 
        return 0;
 }
index 1b09462..8dd280e 100644 (file)
@@ -403,7 +403,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
 
                if (wep_key_len > 0) {
                        wep_key_len = wep_key_len <= 5 ? 5 : 13;
-                       wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
+                       wep_total_len = wep_key_len + sizeof(*pwep);
                        pwep = kzalloc(wep_total_len, GFP_KERNEL);
                        if (!pwep)
                                goto exit;
index ece97e3..30374a8 100644 (file)
@@ -90,7 +90,8 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
                if (wep_key_len > 0) {
                        wep_key_len = wep_key_len <= 5 ? 5 : 13;
                        wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material);
-                       pwep = kzalloc(wep_total_len, GFP_KERNEL);
+                       /* Allocate a full structure to avoid potentially running off the end. */
+                       pwep = kzalloc(sizeof(*pwep), GFP_KERNEL);
                        if (!pwep) {
                                ret = -ENOMEM;
                                goto exit;
@@ -582,7 +583,8 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
                if (wep_key_len > 0) {
                        wep_key_len = wep_key_len <= 5 ? 5 : 13;
                        wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material);
-                       pwep = kzalloc(wep_total_len, GFP_KERNEL);
+                       /* Allocate a full structure to avoid potentially running off the end. */
+                       pwep = kzalloc(sizeof(*pwep), GFP_KERNEL);
                        if (!pwep)
                                goto exit;
 
index 3384452..02fdef7 100644 (file)
@@ -2632,7 +2632,7 @@ static void hfa384x_usbctlx_reaper_task(struct work_struct *work)
  */
 static void hfa384x_usbctlx_completion_task(struct work_struct *work)
 {
-       struct hfa384x *hw = container_of(work, struct hfa384x, reaper_bh);
+       struct hfa384x *hw = container_of(work, struct hfa384x, completion_bh);
        struct hfa384x_usbctlx *ctlx, *temp;
        unsigned long flags;
 
index e68f1cc..6c8d8b0 100644 (file)
@@ -448,6 +448,9 @@ fd_execute_write_same(struct se_cmd *cmd)
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
 
+       if (!cmd->t_data_nents)
+               return TCM_INVALID_CDB_FIELD;
+
        if (cmd->t_data_nents > 1 ||
            cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) {
                pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
index 378c803..1ed9381 100644 (file)
@@ -494,6 +494,10 @@ iblock_execute_write_same(struct se_cmd *cmd)
                       " backends not supported\n");
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
+
+       if (!cmd->t_data_nents)
+               return TCM_INVALID_CDB_FIELD;
+
        sg = &cmd->t_data_sg[0];
 
        if (cmd->t_data_nents > 1 ||
index ca1b231..f613283 100644 (file)
@@ -312,6 +312,12 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *op
                pr_warn("WRITE SAME with ANCHOR not supported\n");
                return TCM_INVALID_CDB_FIELD;
        }
+
+       if (flags & 0x01) {
+               pr_warn("WRITE SAME with NDOB not supported\n");
+               return TCM_INVALID_CDB_FIELD;
+       }
+
        /*
         * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
         * translated into block discard requests within backend code.
index c60896c..73b5e77 100644 (file)
@@ -189,7 +189,7 @@ struct optee_smc_call_get_os_revision_result {
  * Have config return register usage:
  * a0  OPTEE_SMC_RETURN_OK
  * a1  Physical address of start of SHM
- * a2  Size of of SHM
+ * a2  Size of SHM
  * a3  Cache settings of memory, as defined by the
  *     OPTEE_SMC_SHM_* values above
  * a4-7        Preserved
index 385cb0a..a1c1fa1 100644 (file)
@@ -884,8 +884,8 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
 
                rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
                rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
-               if (IS_ERR(arg))
-                       return PTR_ERR(arg);
+               if (IS_ERR(rpc_arg))
+                       return PTR_ERR(rpc_arg);
        }
 
        if  (rpc_arg && tee_shm_is_dynamic(shm)) {
index af0f7c6..98da206 100644 (file)
@@ -1073,7 +1073,7 @@ EXPORT_SYMBOL_GPL(tee_device_unregister);
 /**
  * tee_get_drvdata() - Return driver_data pointer
  * @teedev:    Device containing the driver_data pointer
- * @returns the driver_data pointer supplied to tee_register().
+ * @returns the driver_data pointer supplied to tee_device_alloc().
  */
 void *tee_get_drvdata(struct tee_device *teedev)
 {
index cd80c7d..a9596e7 100644 (file)
@@ -81,6 +81,7 @@ static const struct x86_cpu_id tcc_ids[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
+       X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
        {}
 };
 
index c7968ae..d02de3f 100644 (file)
@@ -426,7 +426,7 @@ static int goldfish_tty_remove(struct platform_device *pdev)
        tty_unregister_device(goldfish_tty_driver, qtty->console.index);
        iounmap(qtty->base);
        qtty->base = NULL;
-       free_irq(qtty->irq, pdev);
+       free_irq(qtty->irq, qtty);
        tty_port_destroy(&qtty->port);
        goldfish_tty_current_line_count--;
        if (goldfish_tty_current_line_count == 0)
index 137eebd..fd4d24f 100644 (file)
@@ -455,7 +455,7 @@ static void gsm_hex_dump_bytes(const char *fname, const u8 *data,
                return;
        }
 
-       prefix = kasprintf(GFP_KERNEL, "%s: ", fname);
+       prefix = kasprintf(GFP_ATOMIC, "%s: ", fname);
        if (!prefix)
                return;
        print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, data, len,
index 74bfabe..752dab3 100644 (file)
@@ -111,21 +111,11 @@ static void pty_unthrottle(struct tty_struct *tty)
 static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
 {
        struct tty_struct *to = tty->link;
-       unsigned long flags;
 
-       if (tty->flow.stopped)
+       if (tty->flow.stopped || !c)
                return 0;
 
-       if (c > 0) {
-               spin_lock_irqsave(&to->port->lock, flags);
-               /* Stuff the data into the input queue of the other end */
-               c = tty_insert_flip_string(to->port, buf, c);
-               spin_unlock_irqrestore(&to->port->lock, flags);
-               /* And shovel */
-               if (c)
-                       tty_flip_buffer_push(to->port);
-       }
-       return c;
+       return tty_insert_flip_string_and_push_buffer(to->port, buf, c);
 }
 
 /**
index cfbd2de..3f56dbc 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/sysrq.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/tty.h>
 #include <linux/ratelimit.h>
 #include <linux/tty_flip.h>
@@ -559,6 +560,9 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
 
                up->port.dev = dev;
 
+               if (uart_console_enabled(&up->port))
+                       pm_runtime_get_sync(up->port.dev);
+
                serial8250_apply_quirks(up);
                uart_add_one_port(drv, &up->port);
        }
index 7133fce..a8dba4a 100644 (file)
@@ -106,10 +106,10 @@ int serial8250_tx_dma(struct uart_8250_port *p)
                                   UART_XMIT_SIZE, DMA_TO_DEVICE);
 
        dma_async_issue_pending(dma->txchan);
-       if (dma->tx_err) {
+       serial8250_clear_THRI(p);
+       if (dma->tx_err)
                dma->tx_err = 0;
-               serial8250_clear_THRI(p);
-       }
+
        return 0;
 err:
        dma->tx_err = 1;
index f57bbd3..bb6aca0 100644 (file)
@@ -47,7 +47,7 @@
 #define RZN1_UART_xDMACR_DMA_EN                BIT(0)
 #define RZN1_UART_xDMACR_1_WORD_BURST  (0 << 1)
 #define RZN1_UART_xDMACR_4_WORD_BURST  (1 << 1)
-#define RZN1_UART_xDMACR_8_WORD_BURST  (3 << 1)
+#define RZN1_UART_xDMACR_8_WORD_BURST  (2 << 1)
 #define RZN1_UART_xDMACR_BLK_SZ(x)     ((x) << 3)
 
 /* Quirks */
@@ -773,18 +773,18 @@ static const struct of_device_id dw8250_of_match[] = {
 MODULE_DEVICE_TABLE(of, dw8250_of_match);
 
 static const struct acpi_device_id dw8250_acpi_match[] = {
-       { "INT33C4", 0 },
-       { "INT33C5", 0 },
-       { "INT3434", 0 },
-       { "INT3435", 0 },
-       { "80860F0A", 0 },
-       { "8086228A", 0 },
-       { "APMC0D08", 0},
-       { "AMD0020", 0 },
-       { "AMDI0020", 0 },
-       { "AMDI0022", 0 },
-       { "BRCM2032", 0 },
-       { "HISI0031", 0 },
+       { "80860F0A", (kernel_ulong_t)&dw8250_dw_apb },
+       { "8086228A", (kernel_ulong_t)&dw8250_dw_apb },
+       { "AMD0020", (kernel_ulong_t)&dw8250_dw_apb },
+       { "AMDI0020", (kernel_ulong_t)&dw8250_dw_apb },
+       { "AMDI0022", (kernel_ulong_t)&dw8250_dw_apb },
+       { "APMC0D08", (kernel_ulong_t)&dw8250_dw_apb},
+       { "BRCM2032", (kernel_ulong_t)&dw8250_dw_apb },
+       { "HISI0031", (kernel_ulong_t)&dw8250_dw_apb },
+       { "INT33C4", (kernel_ulong_t)&dw8250_dw_apb },
+       { "INT33C5", (kernel_ulong_t)&dw8250_dw_apb },
+       { "INT3434", (kernel_ulong_t)&dw8250_dw_apb },
+       { "INT3435", (kernel_ulong_t)&dw8250_dw_apb },
        { },
 };
 MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
index 78b6ded..3c36a06 100644 (file)
@@ -1517,6 +1517,8 @@ static inline void __stop_tx(struct uart_8250_port *p)
                unsigned char lsr = serial_in(p, UART_LSR);
                u64 stop_delay = 0;
 
+               p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+
                if (!(lsr & UART_LSR_THRE))
                        return;
                /*
@@ -1947,7 +1949,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
        if ((status & UART_LSR_THRE) && (up->ier & UART_IER_THRI)) {
                if (!up->dma || up->dma->tx_err)
                        serial8250_tx_chars(up);
-               else
+               else if (!up->dma->tx_running)
                        __stop_tx(up);
        }
 
@@ -2973,8 +2975,10 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
        case UPIO_MEM32BE:
        case UPIO_MEM16:
        case UPIO_MEM:
-               if (!port->mapbase)
+               if (!port->mapbase) {
+                       ret = -EINVAL;
                        break;
+               }
 
                if (!request_mem_region(port->mapbase, size, "serial")) {
                        ret = -EBUSY;
index 97ef41c..16a2142 100644 (file)
@@ -1367,6 +1367,15 @@ static void pl011_stop_rx(struct uart_port *port)
        pl011_dma_rx_stop(uap);
 }
 
+static void pl011_throttle_rx(struct uart_port *port)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&port->lock, flags);
+       pl011_stop_rx(port);
+       spin_unlock_irqrestore(&port->lock, flags);
+}
+
 static void pl011_enable_ms(struct uart_port *port)
 {
        struct uart_amba_port *uap =
@@ -1788,9 +1797,10 @@ static int pl011_allocate_irq(struct uart_amba_port *uap)
  */
 static void pl011_enable_interrupts(struct uart_amba_port *uap)
 {
+       unsigned long flags;
        unsigned int i;
 
-       spin_lock_irq(&uap->port.lock);
+       spin_lock_irqsave(&uap->port.lock, flags);
 
        /* Clear out any spuriously appearing RX interrupts */
        pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
@@ -1812,7 +1822,14 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap)
        if (!pl011_dma_rx_running(uap))
                uap->im |= UART011_RXIM;
        pl011_write(uap->im, uap, REG_IMSC);
-       spin_unlock_irq(&uap->port.lock);
+       spin_unlock_irqrestore(&uap->port.lock, flags);
+}
+
+static void pl011_unthrottle_rx(struct uart_port *port)
+{
+       struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
+
+       pl011_enable_interrupts(uap);
 }
 
 static int pl011_startup(struct uart_port *port)
@@ -2225,6 +2242,8 @@ static const struct uart_ops amba_pl011_pops = {
        .stop_tx        = pl011_stop_tx,
        .start_tx       = pl011_start_tx,
        .stop_rx        = pl011_stop_rx,
+       .throttle       = pl011_throttle_rx,
+       .unthrottle     = pl011_unthrottle_rx,
        .enable_ms      = pl011_enable_ms,
        .break_ctl      = pl011_break_ctl,
        .startup        = pl011_startup,
index 0429c2a..93489fe 100644 (file)
@@ -470,14 +470,14 @@ static void mvebu_uart_shutdown(struct uart_port *port)
        }
 }
 
-static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
+static unsigned int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
 {
        unsigned int d_divisor, m_divisor;
        unsigned long flags;
        u32 brdv, osamp;
 
        if (!port->uartclk)
-               return -EOPNOTSUPP;
+               return 0;
 
        /*
         * The baudrate is derived from the UART clock thanks to divisors:
@@ -548,7 +548,7 @@ static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
                        (m_divisor << 16) | (m_divisor << 24);
        writel(osamp, port->membase + UART_OSAMP);
 
-       return 0;
+       return DIV_ROUND_CLOSEST(port->uartclk, d_divisor * m_divisor);
 }
 
 static void mvebu_uart_set_termios(struct uart_port *port,
@@ -587,15 +587,11 @@ static void mvebu_uart_set_termios(struct uart_port *port,
        max_baud = port->uartclk / 80;
 
        baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud);
-       if (mvebu_uart_baud_rate_set(port, baud)) {
-               /* No clock available, baudrate cannot be changed */
-               if (old)
-                       baud = uart_get_baud_rate(port, old, NULL,
-                                                 min_baud, max_baud);
-       } else {
-               tty_termios_encode_baud_rate(termios, baud, baud);
-               uart_update_timeout(port, termios->c_cflag, baud);
-       }
+       baud = mvebu_uart_baud_rate_set(port, baud);
+
+       /* In case baudrate cannot be changed, report previous old value */
+       if (baud == 0 && old)
+               baud = tty_termios_baud_rate(old);
 
        /* Only the following flag changes are supported */
        if (old) {
@@ -606,6 +602,11 @@ static void mvebu_uart_set_termios(struct uart_port *port,
                termios->c_cflag |= CS8;
        }
 
+       if (baud != 0) {
+               tty_termios_encode_baud_rate(termios, baud, baud);
+               uart_update_timeout(port, termios->c_cflag, baud);
+       }
+
        spin_unlock_irqrestore(&port->lock, flags);
 }
 
index 4733a23..f8f9506 100644 (file)
@@ -1306,6 +1306,7 @@ static const struct uart_ops qcom_geni_console_pops = {
        .stop_tx = qcom_geni_serial_stop_tx,
        .start_tx = qcom_geni_serial_start_tx,
        .stop_rx = qcom_geni_serial_stop_rx,
+       .start_rx = qcom_geni_serial_start_rx,
        .set_termios = qcom_geni_serial_set_termios,
        .startup = qcom_geni_serial_startup,
        .request_port = qcom_geni_serial_request_port,
index d5ca904..1afe47b 100644 (file)
@@ -377,8 +377,7 @@ static void enable_tx_dma(struct s3c24xx_uart_port *ourport)
        /* Enable tx dma mode */
        ucon = rd_regl(port, S3C2410_UCON);
        ucon &= ~(S3C64XX_UCON_TXBURST_MASK | S3C64XX_UCON_TXMODE_MASK);
-       ucon |= (dma_get_cache_alignment() >= 16) ?
-               S3C64XX_UCON_TXBURST_16 : S3C64XX_UCON_TXBURST_1;
+       ucon |= S3C64XX_UCON_TXBURST_1;
        ucon |= S3C64XX_UCON_TXMODE_DMA;
        wr_regl(port,  S3C2410_UCON, ucon);
 
@@ -674,7 +673,7 @@ static void enable_rx_dma(struct s3c24xx_uart_port *ourport)
                        S3C64XX_UCON_DMASUS_EN |
                        S3C64XX_UCON_TIMEOUT_EN |
                        S3C64XX_UCON_RXMODE_MASK);
-       ucon |= S3C64XX_UCON_RXBURST_16 |
+       ucon |= S3C64XX_UCON_RXBURST_1 |
                        0xf << S3C64XX_UCON_TIMEOUT_SHIFT |
                        S3C64XX_UCON_EMPTYINT_EN |
                        S3C64XX_UCON_TIMEOUT_EN |
index 9a85b41..3dc926d 100644 (file)
@@ -1941,11 +1941,6 @@ static int uart_proc_show(struct seq_file *m, void *v)
 }
 #endif
 
-static inline bool uart_console_enabled(struct uart_port *port)
-{
-       return uart_console(port) && (port->cons->flags & CON_ENABLED);
-}
-
 static void uart_port_spin_lock_init(struct uart_port *port)
 {
        spin_lock_init(&port->lock);
@@ -2214,11 +2209,12 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
        /*
         * Nothing to do if the console is not suspending
         * except stop_rx to prevent any asynchronous data
-        * over RX line. Re-start_rx, when required, is
-        * done by set_termios in resume sequence
+        * over RX line. However ensure that we will be
+        * able to Re-start_rx later.
         */
        if (!console_suspend_enabled && uart_console(uport)) {
-               uport->ops->stop_rx(uport);
+               if (uport->ops->start_rx)
+                       uport->ops->stop_rx(uport);
                goto unlock;
        }
 
@@ -2310,6 +2306,8 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
                if (console_suspend_enabled)
                        uart_change_pm(state, UART_PM_STATE_ON);
                uport->ops->set_termios(uport, &termios, NULL);
+               if (!console_suspend_enabled && uport->ops->start_rx)
+                       uport->ops->start_rx(uport);
                if (console_suspend_enabled)
                        console_start(uport->cons);
        }
index b7b44f4..0973b03 100644 (file)
@@ -72,6 +72,8 @@ static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
        *cr3 |= USART_CR3_DEM;
        over8 = *cr1 & USART_CR1_OVER8;
 
+       *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
+
        if (over8)
                rs485_deat_dedt = delay_ADE * baud * 8;
        else
index 18e6233..d2b2720 100644 (file)
@@ -581,7 +581,6 @@ void __handle_sysrq(int key, bool check_mask)
 
        rcu_sysrq_start();
        rcu_read_lock();
-       printk_prefer_direct_enter();
        /*
         * Raise the apparent loglevel to maximum so that the sysrq header
         * is shown to provide the user with positive feedback.  We do not
@@ -623,7 +622,6 @@ void __handle_sysrq(int key, bool check_mask)
                pr_cont("\n");
                console_loglevel = orig_log_level;
        }
-       printk_prefer_direct_exit();
        rcu_read_unlock();
        rcu_sysrq_end();
 
index b710c5e..f310a82 100644 (file)
@@ -111,4 +111,7 @@ static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
 
 ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *);
 
+int tty_insert_flip_string_and_push_buffer(struct tty_port *port,
+               const unsigned char *chars, size_t cnt);
+
 #endif
index bfa431a..595d8b4 100644 (file)
@@ -532,6 +532,15 @@ static void flush_to_ldisc(struct work_struct *work)
 
 }
 
+static inline void tty_flip_buffer_commit(struct tty_buffer *tail)
+{
+       /*
+        * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees
+        * buffer data.
+        */
+       smp_store_release(&tail->commit, tail->used);
+}
+
 /**
  * tty_flip_buffer_push                -       push terminal buffers
  * @port: tty port to push
@@ -546,16 +555,43 @@ void tty_flip_buffer_push(struct tty_port *port)
 {
        struct tty_bufhead *buf = &port->buf;
 
-       /*
-        * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees
-        * buffer data.
-        */
-       smp_store_release(&buf->tail->commit, buf->tail->used);
+       tty_flip_buffer_commit(buf->tail);
        queue_work(system_unbound_wq, &buf->work);
 }
 EXPORT_SYMBOL(tty_flip_buffer_push);
 
 /**
+ * tty_insert_flip_string_and_push_buffer - add characters to the tty buffer and
+ *     push
+ * @port: tty port
+ * @chars: characters
+ * @size: size
+ *
+ * The function combines tty_insert_flip_string() and tty_flip_buffer_push()
+ * with the exception of properly holding the @port->lock.
+ *
+ * To be used only internally (by pty currently).
+ *
+ * Returns: the number added.
+ */
+int tty_insert_flip_string_and_push_buffer(struct tty_port *port,
+               const unsigned char *chars, size_t size)
+{
+       struct tty_bufhead *buf = &port->buf;
+       unsigned long flags;
+
+       spin_lock_irqsave(&port->lock, flags);
+       size = tty_insert_flip_string(port, chars, size);
+       if (size)
+               tty_flip_buffer_commit(buf->tail);
+       spin_unlock_irqrestore(&port->lock, flags);
+
+       queue_work(system_unbound_wq, &buf->work);
+
+       return size;
+}
+
+/**
  * tty_buffer_init             -       prepare a tty buffer structure
  * @port: tty port to initialise
  *
index f8c87c4..dfc1f4b 100644 (file)
@@ -855,7 +855,7 @@ static void delete_char(struct vc_data *vc, unsigned int nr)
        unsigned short *p = (unsigned short *) vc->vc_pos;
 
        vc_uniscr_delete(vc, nr);
-       scr_memcpyw(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2);
+       scr_memmovew(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2);
        scr_memsetw(p + vc->vc_cols - vc->state.x - nr, vc->vc_video_erase_char,
                        nr * 2);
        vc->vc_need_wrap = 0;
index 01fb4ba..3d367be 100644 (file)
@@ -748,17 +748,28 @@ static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
 }
 
 /**
- * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
+ * ufshcd_utrl_clear() - Clear requests from the controller request list.
  * @hba: per adapter instance
- * @pos: position of the bit to be cleared
+ * @mask: mask with one bit set for each request to be cleared
  */
-static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
+static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
 {
        if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
-               ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
-       else
-               ufshcd_writel(hba, ~(1 << pos),
-                               REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+               mask = ~mask;
+       /*
+        * From the UFSHCI specification: "UTP Transfer Request List CLear
+        * Register (UTRLCLR): This field is bit significant. Each bit
+        * corresponds to a slot in the UTP Transfer Request List, where bit 0
+        * corresponds to request slot 0. A bit in this field is set to ‘0’
+        * by host software to indicate to the host controller that a transfer
+        * request slot is cleared. The host controller
+        * shall free up any resources associated to the request slot
+        * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
+        * host software indicates no change to request slots by setting the
+        * associated bits in this field to ‘1’. Bits in this field shall only
+        * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
+        */
+       ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
 }
 
 /**
@@ -2863,27 +2874,26 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
        return ufshcd_compose_devman_upiu(hba, lrbp);
 }
 
-static int
-ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
+/*
+ * Clear all the requests from the controller for which a bit has been set in
+ * @mask and wait until the controller confirms that these requests have been
+ * cleared.
+ */
+static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask)
 {
-       int err = 0;
        unsigned long flags;
-       u32 mask = 1 << tag;
 
        /* clear outstanding transaction before retry */
        spin_lock_irqsave(hba->host->host_lock, flags);
-       ufshcd_utrl_clear(hba, tag);
+       ufshcd_utrl_clear(hba, mask);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
        /*
         * wait for h/w to clear corresponding bit in door-bell.
         * max. wait is 1 sec.
         */
-       err = ufshcd_wait_for_register(hba,
-                       REG_UTP_TRANSFER_REQ_DOOR_BELL,
-                       mask, ~mask, 1000, 1000);
-
-       return err;
+       return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL,
+                                       mask, ~mask, 1000, 1000);
 }
 
 static int
@@ -2943,37 +2953,59 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
                struct ufshcd_lrb *lrbp, int max_timeout)
 {
-       int err = 0;
-       unsigned long time_left;
+       unsigned long time_left = msecs_to_jiffies(max_timeout);
        unsigned long flags;
+       bool pending;
+       int err;
 
+retry:
        time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
-                       msecs_to_jiffies(max_timeout));
+                                               time_left);
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       hba->dev_cmd.complete = NULL;
        if (likely(time_left)) {
+               /*
+                * The completion handler called complete() and the caller of
+                * this function still owns the @lrbp tag so the code below does
+                * not trigger any race conditions.
+                */
+               hba->dev_cmd.complete = NULL;
                err = ufshcd_get_tr_ocs(lrbp);
                if (!err)
                        err = ufshcd_dev_cmd_completion(hba, lrbp);
-       }
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-       if (!time_left) {
+       } else {
                err = -ETIMEDOUT;
                dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
                        __func__, lrbp->task_tag);
-               if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
+               if (ufshcd_clear_cmds(hba, 1U << lrbp->task_tag) == 0) {
                        /* successfully cleared the command, retry if needed */
                        err = -EAGAIN;
-               /*
-                * in case of an error, after clearing the doorbell,
-                * we also need to clear the outstanding_request
-                * field in hba
-                */
-               spin_lock_irqsave(&hba->outstanding_lock, flags);
-               __clear_bit(lrbp->task_tag, &hba->outstanding_reqs);
-               spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+                       /*
+                        * Since clearing the command succeeded we also need to
+                        * clear the task tag bit from the outstanding_reqs
+                        * variable.
+                        */
+                       spin_lock_irqsave(&hba->outstanding_lock, flags);
+                       pending = test_bit(lrbp->task_tag,
+                                          &hba->outstanding_reqs);
+                       if (pending) {
+                               hba->dev_cmd.complete = NULL;
+                               __clear_bit(lrbp->task_tag,
+                                           &hba->outstanding_reqs);
+                       }
+                       spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+                       if (!pending) {
+                               /*
+                                * The completion handler ran while we tried to
+                                * clear the command.
+                                */
+                               time_left = 1;
+                               goto retry;
+                       }
+               } else {
+                       dev_err(hba->dev, "%s: failed to clear tag %d\n",
+                               __func__, lrbp->task_tag);
+               }
        }
 
        return err;
@@ -5728,7 +5760,7 @@ int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
        }
 
        hba->dev_info.wb_enabled = enable;
-       dev_info(hba->dev, "%s Write Booster %s\n",
+       dev_dbg(hba->dev, "%s Write Booster %s\n",
                        __func__, enable ? "enabled" : "disabled");
 
        return ret;
@@ -6958,14 +6990,14 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
 }
 
 /**
- * ufshcd_eh_device_reset_handler - device reset handler registered to
- *                                    scsi layer.
+ * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
  * @cmd: SCSI command pointer
  *
  * Returns SUCCESS/FAILED
  */
 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
 {
+       unsigned long flags, pending_reqs = 0, not_cleared = 0;
        struct Scsi_Host *host;
        struct ufs_hba *hba;
        u32 pos;
@@ -6984,14 +7016,24 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
        }
 
        /* clear the commands that were pending for corresponding LUN */
-       for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
-               if (hba->lrb[pos].lun == lun) {
-                       err = ufshcd_clear_cmd(hba, pos);
-                       if (err)
-                               break;
-                       __ufshcd_transfer_req_compl(hba, 1U << pos);
-               }
+       spin_lock_irqsave(&hba->outstanding_lock, flags);
+       for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
+               if (hba->lrb[pos].lun == lun)
+                       __set_bit(pos, &pending_reqs);
+       hba->outstanding_reqs &= ~pending_reqs;
+       spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+       if (ufshcd_clear_cmds(hba, pending_reqs) < 0) {
+               spin_lock_irqsave(&hba->outstanding_lock, flags);
+               not_cleared = pending_reqs &
+                       ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+               hba->outstanding_reqs |= not_cleared;
+               spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+               dev_err(hba->dev, "%s: failed to clear requests %#lx\n",
+                       __func__, not_cleared);
        }
+       __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared);
 
 out:
        hba->req_abort_count = 0;
@@ -7088,7 +7130,7 @@ static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
                goto out;
        }
 
-       err = ufshcd_clear_cmd(hba, tag);
+       err = ufshcd_clear_cmds(hba, 1U << tag);
        if (err)
                dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
                        __func__, tag, err);
@@ -7233,7 +7275,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
        hba->silence_err_logs = false;
 
        /* scale up clocks to max frequency before full reinitialization */
-       ufshcd_set_clk_freq(hba, true);
+       ufshcd_scale_clks(hba, true);
 
        err = ufshcd_hba_enable(hba);
 
index e7332cc..173aea8 100644 (file)
@@ -108,9 +108,20 @@ out:
        return ret;
 }
 
+static bool phandle_exists(const struct device_node *np,
+                          const char *phandle_name, int index)
+{
+       struct device_node *parse_np = of_parse_phandle(np, phandle_name, index);
+
+       if (parse_np)
+               of_node_put(parse_np);
+
+       return parse_np != NULL;
+}
+
 #define MAX_PROP_SIZE 32
 static int ufshcd_populate_vreg(struct device *dev, const char *name,
-               struct ufs_vreg **out_vreg)
+                               struct ufs_vreg **out_vreg)
 {
        char prop_name[MAX_PROP_SIZE];
        struct ufs_vreg *vreg = NULL;
@@ -122,7 +133,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
        }
 
        snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
-       if (!of_parse_phandle(np, prop_name, 0)) {
+       if (!phandle_exists(np, prop_name, 0)) {
                dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
                                __func__, prop_name);
                goto out;
index e45c3d6..794e413 100644 (file)
@@ -1941,13 +1941,16 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
                }
 
                if (enqd_len + trb_buff_len >= full_len) {
-                       if (need_zero_pkt)
-                               zero_len_trb = !zero_len_trb;
-
-                       field &= ~TRB_CHAIN;
-                       field |= TRB_IOC;
-                       more_trbs_coming = false;
-                       preq->td.last_trb = ring->enqueue;
+                       if (need_zero_pkt && !zero_len_trb) {
+                               zero_len_trb = true;
+                       } else {
+                               zero_len_trb = false;
+                               field &= ~TRB_CHAIN;
+                               field |= TRB_IOC;
+                               more_trbs_coming = false;
+                               need_zero_pkt = false;
+                               preq->td.last_trb = ring->enqueue;
+                       }
                }
 
                /* Only set interrupt on short packet for OUT endpoints. */
@@ -1962,7 +1965,7 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
                length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
                        TRB_INTR_TARGET(0);
 
-               cdnsp_queue_trb(pdev, ring, more_trbs_coming | zero_len_trb,
+               cdnsp_queue_trb(pdev, ring, more_trbs_coming,
                                lower_32_bits(send_addr),
                                upper_32_bits(send_addr),
                                length_field,
index dc6c96e..3b8bf6d 100644 (file)
@@ -1048,6 +1048,9 @@ isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
        struct ci_hdrc *ci = req->context;
        unsigned long flags;
 
+       if (req->status < 0)
+               return;
+
        if (ci->setaddr) {
                hw_usb_set_address(ci, ci->address);
                ci->setaddr = false;
index f63a27d..3f107a0 100644 (file)
@@ -5190,7 +5190,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                retval = -EINVAL;
-               goto error1;
+               goto error2;
        }
        hcd->rsrc_start = res->start;
        hcd->rsrc_len = resource_size(res);
index e027c04..5734219 100644 (file)
@@ -1644,13 +1644,8 @@ static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
         * This device property is for kernel internal use only and
         * is expected to be set by the glue code.
         */
-       if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
-               edev = extcon_get_extcon_dev(name);
-               if (!edev)
-                       return ERR_PTR(-EPROBE_DEFER);
-
-               return edev;
-       }
+       if (device_property_read_string(dev, "linux,extcon-name", &name) == 0)
+               return extcon_get_extcon_dev(name);
 
        /*
         * Try to get an extcon device from the USB PHY controller's "port"
index fea7aca..173cf35 100644 (file)
@@ -195,8 +195,7 @@ static int dwc3_ti_probe(struct platform_device *pdev)
 
        if (i == ARRAY_SIZE(dwc3_ti_rate_table)) {
                dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate);
-               ret = -EINVAL;
-               goto err_clk_disable;
+               return -EINVAL;
        }
 
        data->rate_code = i;
@@ -204,7 +203,7 @@ static int dwc3_ti_probe(struct platform_device *pdev)
        /* Read the syscon property and set the rate code */
        ret = phy_syscon_pll_refclk(data);
        if (ret)
-               goto err_clk_disable;
+               return ret;
 
        /* VBUS divider select */
        data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
@@ -245,8 +244,6 @@ err_pm_disable:
        clk_disable_unprepare(data->usb2_refclk);
        pm_runtime_disable(dev);
        pm_runtime_set_suspended(dev);
-err_clk_disable:
-       clk_put(data->usb2_refclk);
        return ret;
 }
 
@@ -276,7 +273,6 @@ static int dwc3_ti_remove(struct platform_device *pdev)
        pm_runtime_disable(dev);
        pm_runtime_set_suspended(dev);
 
-       clk_put(data->usb2_refclk);
        platform_set_drvdata(pdev, NULL);
        return 0;
 }
index ba51de7..6b01804 100644 (file)
@@ -127,6 +127,7 @@ static const struct property_entry dwc3_pci_intel_phy_charger_detect_properties[
        PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
        PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
        PROPERTY_ENTRY_BOOL("linux,phy_charger_detect"),
+       PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
        {}
 };
 
index 00427d1..0d89dfa 100644 (file)
@@ -2976,6 +2976,7 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
        struct dwc3 *dwc = dep->dwc;
        u32 mdwidth;
        int size;
+       int maxpacket;
 
        mdwidth = dwc3_mdwidth(dwc);
 
@@ -2988,21 +2989,24 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
        else
                size = DWC31_GTXFIFOSIZ_TXFDEP(size);
 
-       /* FIFO Depth is in MDWDITH bytes. Multiply */
-       size *= mdwidth;
-
        /*
-        * To meet performance requirement, a minimum TxFIFO size of 3x
-        * MaxPacketSize is recommended for endpoints that support burst and a
-        * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't
-        * support burst. Use those numbers and we can calculate the max packet
-        * limit as below.
+        * maxpacket size is determined as part of the following, after assuming
+        * a mult value of one maxpacket:
+        * DWC3 revision 280A and prior:
+        * fifo_size = mult * (max_packet / mdwidth) + 1;
+        * maxpacket = mdwidth * (fifo_size - 1);
+        *
+        * DWC3 revision 290A and onwards:
+        * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
+        * maxpacket = mdwidth * ((fifo_size - 1) - 1) - mdwidth;
         */
-       if (dwc->maximum_speed >= USB_SPEED_SUPER)
-               size /= 3;
+       if (DWC3_VER_IS_PRIOR(DWC3, 290A))
+               maxpacket = mdwidth * (size - 1);
        else
-               size /= 2;
+               maxpacket = mdwidth * ((size - 1) - 1) - mdwidth;
 
+       /* Functionally, space for one max packet is sufficient */
+       size = min_t(int, maxpacket, 1024);
        usb_ep_set_maxpacket_limit(&dep->endpoint, size);
 
        dep->endpoint.max_streams = 16;
@@ -4245,7 +4249,6 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
        }
 
        evt->count = 0;
-       evt->flags &= ~DWC3_EVENT_PENDING;
        ret = IRQ_HANDLED;
 
        /* Unmask interrupt */
@@ -4257,6 +4260,9 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
                dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
        }
 
+       /* Keep the clearing of DWC3_EVENT_PENDING at the end */
+       evt->flags &= ~DWC3_EVENT_PENDING;
+
        return ret;
 }
 
index 4585ee3..e0fa4b1 100644 (file)
@@ -122,8 +122,6 @@ struct ffs_ep {
        struct usb_endpoint_descriptor  *descs[3];
 
        u8                              num;
-
-       int                             status; /* P: epfile->mutex */
 };
 
 struct ffs_epfile {
@@ -227,6 +225,9 @@ struct ffs_io_data {
        bool use_sg;
 
        struct ffs_data *ffs;
+
+       int status;
+       struct completion done;
 };
 
 struct ffs_desc_helper {
@@ -707,12 +708,15 @@ static const struct file_operations ffs_ep0_operations = {
 
 static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
 {
+       struct ffs_io_data *io_data = req->context;
+
        ENTER();
-       if (req->context) {
-               struct ffs_ep *ep = _ep->driver_data;
-               ep->status = req->status ? req->status : req->actual;
-               complete(req->context);
-       }
+       if (req->status)
+               io_data->status = req->status;
+       else
+               io_data->status = req->actual;
+
+       complete(&io_data->done);
 }
 
 static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
@@ -1050,7 +1054,6 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
                WARN(1, "%s: data_len == -EINVAL\n", __func__);
                ret = -EINVAL;
        } else if (!io_data->aio) {
-               DECLARE_COMPLETION_ONSTACK(done);
                bool interrupted = false;
 
                req = ep->req;
@@ -1066,7 +1069,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
 
                io_data->buf = data;
 
-               req->context  = &done;
+               init_completion(&io_data->done);
+               req->context  = io_data;
                req->complete = ffs_epfile_io_complete;
 
                ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
@@ -1075,7 +1079,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
 
                spin_unlock_irq(&epfile->ffs->eps_lock);
 
-               if (wait_for_completion_interruptible(&done)) {
+               if (wait_for_completion_interruptible(&io_data->done)) {
+                       spin_lock_irq(&epfile->ffs->eps_lock);
+                       if (epfile->ep != ep) {
+                               ret = -ESHUTDOWN;
+                               goto error_lock;
+                       }
                        /*
                         * To avoid race condition with ffs_epfile_io_complete,
                         * dequeue the request first then check
@@ -1083,17 +1092,18 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
                         * condition with req->complete callback.
                         */
                        usb_ep_dequeue(ep->ep, req);
-                       wait_for_completion(&done);
-                       interrupted = ep->status < 0;
+                       spin_unlock_irq(&epfile->ffs->eps_lock);
+                       wait_for_completion(&io_data->done);
+                       interrupted = io_data->status < 0;
                }
 
                if (interrupted)
                        ret = -EINTR;
-               else if (io_data->read && ep->status > 0)
-                       ret = __ffs_epfile_read_data(epfile, data, ep->status,
+               else if (io_data->read && io_data->status > 0)
+                       ret = __ffs_epfile_read_data(epfile, data, io_data->status,
                                                     &io_data->data);
                else
-                       ret = ep->status;
+                       ret = io_data->status;
                goto error_mutex;
        } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
                ret = -ENOMEM;
index 6f5d45e..f51694f 100644 (file)
@@ -775,9 +775,13 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
        dev->qmult = qmult;
        snprintf(net->name, sizeof(net->name), "%s%%d", netname);
 
-       if (get_ether_addr(dev_addr, addr))
+       if (get_ether_addr(dev_addr, addr)) {
+               net->addr_assign_type = NET_ADDR_RANDOM;
                dev_warn(&g->dev,
                        "using random %s ethernet address\n", "self");
+       } else {
+               net->addr_assign_type = NET_ADDR_SET;
+       }
        eth_hw_addr_set(net, addr);
        if (get_ether_addr(host_addr, dev->host_mac))
                dev_warn(&g->dev,
@@ -844,6 +848,10 @@ struct net_device *gether_setup_name_default(const char *netname)
 
        eth_random_addr(dev->dev_mac);
        pr_warn("using random %s ethernet address\n", "self");
+
+       /* by default we always have a random MAC address */
+       net->addr_assign_type = NET_ADDR_RANDOM;
+
        eth_random_addr(dev->host_mac);
        pr_warn("using random %s ethernet address\n", "host");
 
@@ -871,7 +879,6 @@ int gether_register_netdev(struct net_device *net)
        dev = netdev_priv(net);
        g = dev->gadget;
 
-       net->addr_assign_type = NET_ADDR_RANDOM;
        eth_hw_addr_set(net, dev->dev_mac);
 
        status = register_netdev(net);
@@ -912,6 +919,7 @@ int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
        if (get_ether_addr(dev_addr, new_addr))
                return -EINVAL;
        memcpy(dev->dev_mac, new_addr, ETH_ALEN);
+       net->addr_assign_type = NET_ADDR_SET;
        return 0;
 }
 EXPORT_SYMBOL_GPL(gether_set_dev_addr);
index e5a6b6e..4303a32 100644 (file)
@@ -2371,6 +2371,7 @@ static ssize_t f_uvc_opts_string_##cname##_store(struct config_item *item,\
                                          const char *page, size_t len) \
 {                                                                      \
        struct f_uvc_opts *opts = to_f_uvc_opts(item);                  \
+       int size = min(sizeof(opts->aname), len + 1);                   \
        int ret = 0;                                                    \
                                                                        \
        mutex_lock(&opts->lock);                                        \
@@ -2379,8 +2380,9 @@ static ssize_t f_uvc_opts_string_##cname##_store(struct config_item *item,\
                goto end;                                               \
        }                                                               \
                                                                        \
-       ret = snprintf(opts->aname, min(sizeof(opts->aname), len),      \
-                       "%s", page);                                    \
+       ret = strscpy(opts->aname, page, size);                         \
+       if (ret == -E2BIG)                                              \
+               ret = size - 1;                                         \
                                                                        \
 end:                                                                   \
        mutex_unlock(&opts->lock);                                      \
index a9bb455..d42bb33 100644 (file)
@@ -424,6 +424,9 @@ static void uvcg_video_pump(struct work_struct *work)
                        uvcg_queue_cancel(queue, 0);
                        break;
                }
+
+               /* Endpoint now owns the request */
+               req = NULL;
                video->req_int_count++;
        }
 
index 2417400..2acece1 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/ctype.h>
 #include <linux/debugfs.h>
 #include <linux/delay.h>
+#include <linux/idr.h>
 #include <linux/kref.h>
 #include <linux/miscdevice.h>
 #include <linux/module.h>
@@ -36,6 +37,9 @@ MODULE_LICENSE("GPL");
 
 /*----------------------------------------------------------------------*/
 
+static DEFINE_IDA(driver_id_numbers);
+#define DRIVER_DRIVER_NAME_LENGTH_MAX  32
+
 #define RAW_EVENT_QUEUE_SIZE   16
 
 struct raw_event_queue {
@@ -161,6 +165,9 @@ struct raw_dev {
        /* Reference to misc device: */
        struct device                   *dev;
 
+       /* Make driver names unique */
+       int                             driver_id_number;
+
        /* Protected by lock: */
        enum dev_state                  state;
        bool                            gadget_registered;
@@ -189,6 +196,7 @@ static struct raw_dev *dev_new(void)
        spin_lock_init(&dev->lock);
        init_completion(&dev->ep0_done);
        raw_event_queue_init(&dev->queue);
+       dev->driver_id_number = -1;
        return dev;
 }
 
@@ -199,6 +207,9 @@ static void dev_free(struct kref *kref)
 
        kfree(dev->udc_name);
        kfree(dev->driver.udc_name);
+       kfree(dev->driver.driver.name);
+       if (dev->driver_id_number >= 0)
+               ida_free(&driver_id_numbers, dev->driver_id_number);
        if (dev->req) {
                if (dev->ep0_urb_queued)
                        usb_ep_dequeue(dev->gadget->ep0, dev->req);
@@ -419,9 +430,11 @@ out_put:
 static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
 {
        int ret = 0;
+       int driver_id_number;
        struct usb_raw_init arg;
        char *udc_driver_name;
        char *udc_device_name;
+       char *driver_driver_name;
        unsigned long flags;
 
        if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
@@ -440,36 +453,43 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
                return -EINVAL;
        }
 
+       driver_id_number = ida_alloc(&driver_id_numbers, GFP_KERNEL);
+       if (driver_id_number < 0)
+               return driver_id_number;
+
+       driver_driver_name = kmalloc(DRIVER_DRIVER_NAME_LENGTH_MAX, GFP_KERNEL);
+       if (!driver_driver_name) {
+               ret = -ENOMEM;
+               goto out_free_driver_id_number;
+       }
+       snprintf(driver_driver_name, DRIVER_DRIVER_NAME_LENGTH_MAX,
+                               DRIVER_NAME ".%d", driver_id_number);
+
        udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
-       if (!udc_driver_name)
-               return -ENOMEM;
+       if (!udc_driver_name) {
+               ret = -ENOMEM;
+               goto out_free_driver_driver_name;
+       }
        ret = strscpy(udc_driver_name, &arg.driver_name[0],
                                UDC_NAME_LENGTH_MAX);
-       if (ret < 0) {
-               kfree(udc_driver_name);
-               return ret;
-       }
+       if (ret < 0)
+               goto out_free_udc_driver_name;
        ret = 0;
 
        udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
        if (!udc_device_name) {
-               kfree(udc_driver_name);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_free_udc_driver_name;
        }
        ret = strscpy(udc_device_name, &arg.device_name[0],
                                UDC_NAME_LENGTH_MAX);
-       if (ret < 0) {
-               kfree(udc_driver_name);
-               kfree(udc_device_name);
-               return ret;
-       }
+       if (ret < 0)
+               goto out_free_udc_device_name;
        ret = 0;
 
        spin_lock_irqsave(&dev->lock, flags);
        if (dev->state != STATE_DEV_OPENED) {
                dev_dbg(dev->dev, "fail, device is not opened\n");
-               kfree(udc_driver_name);
-               kfree(udc_device_name);
                ret = -EINVAL;
                goto out_unlock;
        }
@@ -484,14 +504,25 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
        dev->driver.suspend = gadget_suspend;
        dev->driver.resume = gadget_resume;
        dev->driver.reset = gadget_reset;
-       dev->driver.driver.name = DRIVER_NAME;
+       dev->driver.driver.name = driver_driver_name;
        dev->driver.udc_name = udc_device_name;
        dev->driver.match_existing_only = 1;
+       dev->driver_id_number = driver_id_number;
 
        dev->state = STATE_DEV_INITIALIZED;
+       spin_unlock_irqrestore(&dev->lock, flags);
+       return ret;
 
 out_unlock:
        spin_unlock_irqrestore(&dev->lock, flags);
+out_free_udc_device_name:
+       kfree(udc_device_name);
+out_free_udc_driver_name:
+       kfree(udc_driver_name);
+out_free_driver_driver_name:
+       kfree(driver_driver_name);
+out_free_driver_id_number:
+       ida_free(&driver_id_numbers, driver_id_number);
        return ret;
 }
 
index 6117ae8..cea10cd 100644 (file)
@@ -3016,6 +3016,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
        }
 
        udc->isp1301_i2c_client = isp1301_get_client(isp1301_node);
+       of_node_put(isp1301_node);
        if (!udc->isp1301_i2c_client) {
                return -EPROBE_DEFER;
        }
index 385be30..896c0d1 100644 (file)
@@ -76,14 +76,9 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!res) {
-               dev_err(&pdev->dev,
-                       "Found HC with no IRQ. Check %s setup!\n",
-                       dev_name(&pdev->dev));
-               return -ENODEV;
-       }
-       irq = res->start;
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
 
        hcd = __usb_create_hcd(&fsl_ehci_hc_driver, pdev->dev.parent,
                               &pdev->dev, dev_name(&pdev->dev), NULL);
index 44a7e58..e5df175 100644 (file)
@@ -112,6 +112,9 @@ static struct platform_device *fsl_usb2_device_register(
                        goto error;
        }
 
+       pdev->dev.of_node = ofdev->dev.of_node;
+       pdev->dev.of_node_reused = true;
+
        retval = platform_device_add(pdev);
        if (retval)
                goto error;
index c54f2bc..0fdc014 100644 (file)
@@ -652,7 +652,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
  * It will release and re-aquire the lock while calling ACPI
  * method.
  */
-static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
+void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
                                u16 index, bool on, unsigned long *flags)
        __must_hold(&xhci->lock)
 {
index fac9492..dce6c0e 100644 (file)
@@ -61,6 +61,8 @@
 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI            0x461e
 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI          0x464e
 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI        0x51ed
+#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI           0xa71e
+#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI           0x7ec0
 
 #define PCI_DEVICE_ID_AMD_RENOIR_XHCI                  0x1639
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4                        0x43b9
@@ -269,7 +271,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
             pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI ||
-            pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI))
+            pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI))
                xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
 
        if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
index f0ab631..65858f6 100644 (file)
@@ -611,15 +611,37 @@ static int xhci_init(struct usb_hcd *hcd)
 
 static int xhci_run_finished(struct xhci_hcd *xhci)
 {
+       unsigned long   flags;
+       u32             temp;
+
+       /*
+        * Enable interrupts before starting the host (xhci 4.2 and 5.5.2).
+        * Protect the short window before host is running with a lock
+        */
+       spin_lock_irqsave(&xhci->lock, flags);
+
+       xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts");
+       temp = readl(&xhci->op_regs->command);
+       temp |= (CMD_EIE);
+       writel(temp, &xhci->op_regs->command);
+
+       xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter");
+       temp = readl(&xhci->ir_set->irq_pending);
+       writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
+
        if (xhci_start(xhci)) {
                xhci_halt(xhci);
+               spin_unlock_irqrestore(&xhci->lock, flags);
                return -ENODEV;
        }
+
        xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
 
        if (xhci->quirks & XHCI_NEC_HOST)
                xhci_ring_cmd_db(xhci);
 
+       spin_unlock_irqrestore(&xhci->lock, flags);
+
        return 0;
 }
 
@@ -668,19 +690,6 @@ int xhci_run(struct usb_hcd *hcd)
        temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
        writel(temp, &xhci->ir_set->irq_control);
 
-       /* Set the HCD state before we enable the irqs */
-       temp = readl(&xhci->op_regs->command);
-       temp |= (CMD_EIE);
-       xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-                       "// Enable interrupts, cmd = 0x%x.", temp);
-       writel(temp, &xhci->op_regs->command);
-
-       temp = readl(&xhci->ir_set->irq_pending);
-       xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-                       "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
-                       xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
-       writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
-
        if (xhci->quirks & XHCI_NEC_HOST) {
                struct xhci_command *command;
 
@@ -782,6 +791,8 @@ static void xhci_stop(struct usb_hcd *hcd)
 void xhci_shutdown(struct usb_hcd *hcd)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       unsigned long flags;
+       int i;
 
        if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
                usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
@@ -797,12 +808,21 @@ void xhci_shutdown(struct usb_hcd *hcd)
                del_timer_sync(&xhci->shared_hcd->rh_timer);
        }
 
-       spin_lock_irq(&xhci->lock);
+       spin_lock_irqsave(&xhci->lock, flags);
        xhci_halt(xhci);
+
+       /* Power off USB2 ports*/
+       for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
+               xhci_set_port_power(xhci, xhci->main_hcd, i, false, &flags);
+
+       /* Power off USB3 ports*/
+       for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
+               xhci_set_port_power(xhci, xhci->shared_hcd, i, false, &flags);
+
        /* Workaround for spurious wakeups at shutdown with HSW */
        if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
                xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
-       spin_unlock_irq(&xhci->lock);
+       spin_unlock_irqrestore(&xhci->lock, flags);
 
        xhci_cleanup_msix(xhci);
 
@@ -1107,7 +1127,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 {
        u32                     command, temp = 0;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
-       struct usb_hcd          *secondary_hcd;
        int                     retval = 0;
        bool                    comp_timer_running = false;
        bool                    pending_portevent = false;
@@ -1214,23 +1233,19 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                 * first with the primary HCD, and then with the secondary HCD.
                 * If we don't do the same, the host will never be started.
                 */
-               if (!usb_hcd_is_primary_hcd(hcd))
-                       secondary_hcd = hcd;
-               else
-                       secondary_hcd = xhci->shared_hcd;
-
                xhci_dbg(xhci, "Initialize the xhci_hcd\n");
-               retval = xhci_init(hcd->primary_hcd);
+               retval = xhci_init(hcd);
                if (retval)
                        return retval;
                comp_timer_running = true;
 
                xhci_dbg(xhci, "Start the primary HCD\n");
-               retval = xhci_run(hcd->primary_hcd);
-               if (!retval && secondary_hcd) {
+               retval = xhci_run(hcd);
+               if (!retval && xhci->shared_hcd) {
                        xhci_dbg(xhci, "Start the secondary HCD\n");
-                       retval = xhci_run(secondary_hcd);
+                       retval = xhci_run(xhci->shared_hcd);
                }
+
                hcd->state = HC_STATE_SUSPENDED;
                if (xhci->shared_hcd)
                        xhci->shared_hcd->state = HC_STATE_SUSPENDED;
index 0bd76c9..28aaf03 100644 (file)
@@ -2196,6 +2196,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
 int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
 struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
+void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index,
+                        bool on, unsigned long *flags);
 
 void xhci_hc_died(struct xhci_hcd *xhci);
 
index b440d33..d5a3986 100644 (file)
@@ -1023,6 +1023,9 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
        { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
        { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
+       /* Belimo Automation devices */
+       { USB_DEVICE(FTDI_VID, BELIMO_ZTH_PID) },
+       { USB_DEVICE(FTDI_VID, BELIMO_ZIP_PID) },
        /* ICP DAS I-756xU devices */
        { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
        { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
index d1a9564..4e92c16 100644 (file)
 #define CHETCO_SEASMART_ANALOG_PID     0xA5AF /* SeaSmart Analog Adapter */
 
 /*
+ * Belimo Automation
+ */
+#define BELIMO_ZTH_PID                 0x8050
+#define BELIMO_ZIP_PID                 0xC811
+
+/*
  * Unjo AB
  */
 #define UNJO_VID                       0x22B7
index a7b3c15..feba2a8 100644 (file)
@@ -166,6 +166,7 @@ static const struct usb_device_id edgeport_2port_id_table[] = {
        { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
        { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
        { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
+       { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) },
        { }
 };
 
@@ -204,6 +205,7 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
        { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
        { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
+       { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) },
        { }
 };
 
index 52cbc35..9a6f742 100644 (file)
 //
 // Definitions for other product IDs
 #define ION_DEVICE_ID_MT4X56USB                        0x1403  // OEM device
+#define ION_DEVICE_ID_E5805A                   0x1A01  // OEM device (rebranded Edgeport/4)
 
 
 #define        GENERATION_ID_FROM_USB_PRODUCT_ID(ProductId)                            \
index e60425b..de59fa9 100644 (file)
@@ -252,10 +252,12 @@ static void option_instat_callback(struct urb *urb);
 #define QUECTEL_PRODUCT_EG95                   0x0195
 #define QUECTEL_PRODUCT_BG96                   0x0296
 #define QUECTEL_PRODUCT_EP06                   0x0306
+#define QUECTEL_PRODUCT_EM05G                  0x030a
 #define QUECTEL_PRODUCT_EM12                   0x0512
 #define QUECTEL_PRODUCT_RM500Q                 0x0800
 #define QUECTEL_PRODUCT_EC200S_CN              0x6002
 #define QUECTEL_PRODUCT_EC200T                 0x6026
+#define QUECTEL_PRODUCT_RM500K                 0x7001
 
 #define CMOTECH_VENDOR_ID                      0x16d8
 #define CMOTECH_PRODUCT_6001                   0x6001
@@ -432,6 +434,8 @@ static void option_instat_callback(struct urb *urb);
 #define CINTERION_PRODUCT_CLS8                 0x00b0
 #define CINTERION_PRODUCT_MV31_MBIM            0x00b3
 #define CINTERION_PRODUCT_MV31_RMNET           0x00b7
+#define CINTERION_PRODUCT_MV31_2_MBIM          0x00b8
+#define CINTERION_PRODUCT_MV31_2_RMNET         0x00b9
 #define CINTERION_PRODUCT_MV32_WA              0x00f1
 #define CINTERION_PRODUCT_MV32_WB              0x00f2
 
@@ -1132,6 +1136,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+       { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
+         .driver_info = RSVD(6) | ZLP },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
@@ -1145,6 +1151,7 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = ZLP },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
 
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
@@ -1277,6 +1284,7 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff),    /* Telit LE910Cx (RNDIS) */
          .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x1250, 0xff, 0x00, 0x00) },   /* Telit LE910Cx (rmnet) */
        { USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
        { USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
@@ -1979,6 +1987,10 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(3)},
        { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
          .driver_info = RSVD(0)},
+       { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_MBIM, 0xff),
+         .driver_info = RSVD(3)},
+       { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_RMNET, 0xff),
+         .driver_info = RSVD(0)},
        { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
          .driver_info = RSVD(3)},
        { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
index 3506c47..40b1ab3 100644 (file)
@@ -436,22 +436,27 @@ static int pl2303_detect_type(struct usb_serial *serial)
                break;
        case 0x200:
                switch (bcdDevice) {
-               case 0x100:
+               case 0x100:     /* GC */
                case 0x105:
+                       return TYPE_HXN;
+               case 0x300:     /* GT / TA */
+                       if (pl2303_supports_hx_status(serial))
+                               return TYPE_TA;
+                       fallthrough;
                case 0x305:
+               case 0x400:     /* GL */
                case 0x405:
+                       return TYPE_HXN;
+               case 0x500:     /* GE / TB */
+                       if (pl2303_supports_hx_status(serial))
+                               return TYPE_TB;
+                       fallthrough;
+               case 0x505:
+               case 0x600:     /* GS */
                case 0x605:
-                       /*
-                        * Assume it's an HXN-type if the device doesn't
-                        * support the old read request value.
-                        */
-                       if (!pl2303_supports_hx_status(serial))
-                               return TYPE_HXN;
-                       break;
-               case 0x300:
-                       return TYPE_TA;
-               case 0x500:
-                       return TYPE_TB;
+               case 0x700:     /* GR */
+               case 0x705:
+                       return TYPE_HXN;
                }
                break;
        }
index ee0e520..c472475 100644 (file)
@@ -1718,6 +1718,7 @@ void typec_set_pwr_opmode(struct typec_port *port,
                        partner->usb_pd = 1;
                        sysfs_notify(&partner_dev->kobj, NULL,
                                     "supports_usb_power_delivery");
+                       kobject_uevent(&partner_dev->kobj, KOBJ_CHANGE);
                }
                put_device(partner_dev);
        }
index 557f392..073fd2e 100644 (file)
@@ -56,7 +56,6 @@ config TYPEC_WCOVE
        tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver"
        depends on ACPI
        depends on MFD_INTEL_PMC_BXT
-       depends on INTEL_SOC_PMIC
        depends on BXT_WC_PMIC_OPREGION
        help
          This driver adds support for USB Type-C on Intel Broxton platforms
index 1b6d46b..e85c1d7 100644 (file)
@@ -1962,6 +1962,8 @@ static void mlx5_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_c
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
 
        ndev->event_cbs[idx] = *cb;
+       if (is_ctrl_vq_idx(mvdev, idx))
+               mvdev->cvq.event_cb = *cb;
 }
 
 static void mlx5_cvq_notify(struct vringh *vring)
@@ -2174,7 +2176,6 @@ static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features)
 static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
 {
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
-       struct mlx5_control_vq *cvq = &mvdev->cvq;
        int err;
        int i;
 
@@ -2184,16 +2185,6 @@ static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
                        goto err_vq;
        }
 
-       if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) {
-               err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
-                                       MLX5_CVQ_MAX_ENT, false,
-                                       (struct vring_desc *)(uintptr_t)cvq->desc_addr,
-                                       (struct vring_avail *)(uintptr_t)cvq->driver_addr,
-                                       (struct vring_used *)(uintptr_t)cvq->device_addr);
-               if (err)
-                       goto err_vq;
-       }
-
        return 0;
 
 err_vq:
@@ -2466,6 +2457,21 @@ static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
        ndev->mvdev.cvq.ready = false;
 }
 
+static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev)
+{
+       struct mlx5_control_vq *cvq = &mvdev->cvq;
+       int err = 0;
+
+       if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
+               err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
+                                       MLX5_CVQ_MAX_ENT, false,
+                                       (struct vring_desc *)(uintptr_t)cvq->desc_addr,
+                                       (struct vring_avail *)(uintptr_t)cvq->driver_addr,
+                                       (struct vring_used *)(uintptr_t)cvq->device_addr);
+
+       return err;
+}
+
 static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
 {
        struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -2478,6 +2484,11 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
 
        if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
                if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
+                       err = setup_cvq_vring(mvdev);
+                       if (err) {
+                               mlx5_vdpa_warn(mvdev, "failed to setup control VQ vring\n");
+                               goto err_setup;
+                       }
                        err = setup_driver(mvdev);
                        if (err) {
                                mlx5_vdpa_warn(mvdev, "failed to setup driver\n");
index 776ad74..3bc27de 100644 (file)
@@ -1476,16 +1476,12 @@ static char *vduse_devnode(struct device *dev, umode_t *mode)
        return kasprintf(GFP_KERNEL, "vduse/%s", dev_name(dev));
 }
 
-static void vduse_mgmtdev_release(struct device *dev)
-{
-}
-
-static struct device vduse_mgmtdev = {
-       .init_name = "vduse",
-       .release = vduse_mgmtdev_release,
+struct vduse_mgmt_dev {
+       struct vdpa_mgmt_dev mgmt_dev;
+       struct device dev;
 };
 
-static struct vdpa_mgmt_dev mgmt_dev;
+static struct vduse_mgmt_dev *vduse_mgmt;
 
 static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
 {
@@ -1510,7 +1506,7 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
        }
        set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops);
        vdev->vdpa.dma_dev = &vdev->vdpa.dev;
-       vdev->vdpa.mdev = &mgmt_dev;
+       vdev->vdpa.mdev = &vduse_mgmt->mgmt_dev;
 
        return 0;
 }
@@ -1556,34 +1552,52 @@ static struct virtio_device_id id_table[] = {
        { 0 },
 };
 
-static struct vdpa_mgmt_dev mgmt_dev = {
-       .device = &vduse_mgmtdev,
-       .id_table = id_table,
-       .ops = &vdpa_dev_mgmtdev_ops,
-};
+static void vduse_mgmtdev_release(struct device *dev)
+{
+       struct vduse_mgmt_dev *mgmt_dev;
+
+       mgmt_dev = container_of(dev, struct vduse_mgmt_dev, dev);
+       kfree(mgmt_dev);
+}
 
 static int vduse_mgmtdev_init(void)
 {
        int ret;
 
-       ret = device_register(&vduse_mgmtdev);
-       if (ret)
+       vduse_mgmt = kzalloc(sizeof(*vduse_mgmt), GFP_KERNEL);
+       if (!vduse_mgmt)
+               return -ENOMEM;
+
+       ret = dev_set_name(&vduse_mgmt->dev, "vduse");
+       if (ret) {
+               kfree(vduse_mgmt);
                return ret;
+       }
 
-       ret = vdpa_mgmtdev_register(&mgmt_dev);
+       vduse_mgmt->dev.release = vduse_mgmtdev_release;
+
+       ret = device_register(&vduse_mgmt->dev);
        if (ret)
-               goto err;
+               goto dev_reg_err;
 
-       return 0;
-err:
-       device_unregister(&vduse_mgmtdev);
+       vduse_mgmt->mgmt_dev.id_table = id_table;
+       vduse_mgmt->mgmt_dev.ops = &vdpa_dev_mgmtdev_ops;
+       vduse_mgmt->mgmt_dev.device = &vduse_mgmt->dev;
+       ret = vdpa_mgmtdev_register(&vduse_mgmt->mgmt_dev);
+       if (ret)
+               device_unregister(&vduse_mgmt->dev);
+
+       return ret;
+
+dev_reg_err:
+       put_device(&vduse_mgmt->dev);
        return ret;
 }
 
 static void vduse_mgmtdev_exit(void)
 {
-       vdpa_mgmtdev_unregister(&mgmt_dev);
-       device_unregister(&vduse_mgmtdev);
+       vdpa_mgmtdev_unregister(&vduse_mgmt->mgmt_dev);
+       device_unregister(&vduse_mgmt->dev);
 }
 
 static int vduse_init(void)
index 61e71c1..e60b06f 100644 (file)
@@ -549,6 +549,16 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
        if (!iommu_group)
                return ERR_PTR(-EINVAL);
 
+       /*
+        * VFIO always sets IOMMU_CACHE because we offer no way for userspace to
+        * restore cache coherency. It has to be checked here because it is only
+        * valid for cases where we are using iommu groups.
+        */
+       if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
+               iommu_group_put(iommu_group);
+               return ERR_PTR(-EINVAL);
+       }
+
        group = vfio_group_get_from_iommu(iommu_group);
        if (!group)
                group = vfio_create_group(iommu_group, VFIO_IOMMU);
@@ -601,13 +611,6 @@ static int __vfio_register_dev(struct vfio_device *device,
 
 int vfio_register_group_dev(struct vfio_device *device)
 {
-       /*
-        * VFIO always sets IOMMU_CACHE because we offer no way for userspace to
-        * restore cache coherency.
-        */
-       if (!iommu_capable(device->dev->bus, IOMMU_CAP_CACHE_COHERENCY))
-               return -EINVAL;
-
        return __vfio_register_dev(device,
                vfio_group_find_or_alloc(device->dev));
 }
index 5ad2596..23dcbfd 100644 (file)
@@ -1209,7 +1209,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
        vhost_dev_stop(&v->vdev);
        vhost_vdpa_free_domain(v);
        vhost_vdpa_config_put(v);
-       vhost_dev_cleanup(&v->vdev);
+       vhost_vdpa_cleanup(v);
        mutex_unlock(&d->mutex);
 
        atomic_dec(&v->opened);
index fa23bf0..bd4dc97 100644 (file)
@@ -1148,6 +1148,7 @@ int sti_call(const struct sti_struct *sti, unsigned long func,
        return ret;
 }
 
+#if defined(CONFIG_FB_STI)
 /* check if given fb_info is the primary device */
 int fb_is_primary_device(struct fb_info *info)
 {
@@ -1163,6 +1164,7 @@ int fb_is_primary_device(struct fb_info *info)
        return (sti->info == info);
 }
 EXPORT_SYMBOL(fb_is_primary_device);
+#endif
 
 MODULE_AUTHOR("Philipp Rumpf, Helge Deller, Thomas Bogendoerfer");
 MODULE_DESCRIPTION("Core STI driver for HP's NGLE series graphics cards in HP PARISC machines");
index 52f731a..519313b 100644 (file)
@@ -560,8 +560,7 @@ int au1100fb_drv_suspend(struct platform_device *dev, pm_message_t state)
        /* Blank the LCD */
        au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info);
 
-       if (fbdev->lcdclk)
-               clk_disable(fbdev->lcdclk);
+       clk_disable(fbdev->lcdclk);
 
        memcpy(&fbregs, fbdev->regs, sizeof(struct au1100fb_regs));
 
@@ -577,8 +576,7 @@ int au1100fb_drv_resume(struct platform_device *dev)
 
        memcpy(fbdev->regs, &fbregs, sizeof(struct au1100fb_regs));
 
-       if (fbdev->lcdclk)
-               clk_enable(fbdev->lcdclk);
+       clk_enable(fbdev->lcdclk);
 
        /* Unblank the LCD */
        au1100fb_fb_blank(VESA_NO_BLANKING, &fbdev->info);
index 3d47c34..51e072c 100644 (file)
@@ -2184,12 +2184,6 @@ static struct pci_driver cirrusfb_pci_driver = {
        .id_table       = cirrusfb_pci_table,
        .probe          = cirrusfb_pci_register,
        .remove         = cirrusfb_pci_unregister,
-#ifdef CONFIG_PM
-#if 0
-       .suspend        = cirrusfb_pci_suspend,
-       .resume         = cirrusfb_pci_resume,
-#endif
-#endif
 };
 #endif /* CONFIG_PCI */
 
index c4e9171..1a9aa12 100644 (file)
@@ -2469,6 +2469,11 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
        if (charcount != 256 && charcount != 512)
                return -EINVAL;
 
+       /* font bigger than screen resolution ? */
+       if (w > FBCON_SWAP(info->var.rotate, info->var.xres, info->var.yres) ||
+           h > FBCON_SWAP(info->var.rotate, info->var.yres, info->var.xres))
+               return -EINVAL;
+
        /* Make sure drawing engine can handle the font */
        if (!(info->pixmap.blit_x & (1 << (font->width - 1))) ||
            !(info->pixmap.blit_y & (1 << (font->height - 1))))
@@ -2731,6 +2736,34 @@ void fbcon_update_vcs(struct fb_info *info, bool all)
 }
 EXPORT_SYMBOL(fbcon_update_vcs);
 
+/* let fbcon check if it supports a new screen resolution */
+int fbcon_modechange_possible(struct fb_info *info, struct fb_var_screeninfo *var)
+{
+       struct fbcon_ops *ops = info->fbcon_par;
+       struct vc_data *vc;
+       unsigned int i;
+
+       WARN_CONSOLE_UNLOCKED();
+
+       if (!ops)
+               return 0;
+
+       /* prevent setting a screen size which is smaller than font size */
+       for (i = first_fb_vc; i <= last_fb_vc; i++) {
+               vc = vc_cons[i].d;
+               if (!vc || vc->vc_mode != KD_TEXT ||
+                          fbcon_info_from_console(i) != info)
+                       continue;
+
+               if (vc->vc_font.width  > FBCON_SWAP(var->rotate, var->xres, var->yres) ||
+                   vc->vc_font.height > FBCON_SWAP(var->rotate, var->yres, var->xres))
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(fbcon_modechange_possible);
+
 int fbcon_mode_deleted(struct fb_info *info,
                       struct fb_videomode *mode)
 {
index afa2863..7ee6eb2 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/kernel.h>
 #include <linux/major.h>
 #include <linux/slab.h>
+#include <linux/sysfb.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
 #include <linux/vt.h>
@@ -510,7 +511,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate,
 
                while (n && (n * (logo->width + 8) - 8 > xres))
                        --n;
-               image.dx = (xres - n * (logo->width + 8) - 8) / 2;
+               image.dx = (xres - (n * (logo->width + 8) - 8)) / 2;
                image.dy = y ?: (yres - logo->height) / 2;
        } else {
                image.dx = 0;
@@ -1016,6 +1017,16 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
        if (ret)
                return ret;
 
+       /* verify that virtual resolution >= physical resolution */
+       if (var->xres_virtual < var->xres ||
+           var->yres_virtual < var->yres) {
+               pr_warn("WARNING: fbcon: Driver '%s' missed to adjust virtual screen size (%ux%u vs. %ux%u)\n",
+                       info->fix.id,
+                       var->xres_virtual, var->yres_virtual,
+                       var->xres, var->yres);
+               return -EINVAL;
+       }
+
        if ((var->activate & FB_ACTIVATE_MASK) != FB_ACTIVATE_NOW)
                return 0;
 
@@ -1106,7 +1117,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
                        return -EFAULT;
                console_lock();
                lock_fb_info(info);
-               ret = fb_set_var(info, &var);
+               ret = fbcon_modechange_possible(info, &var);
+               if (!ret)
+                       ret = fb_set_var(info, &var);
                if (!ret)
                        fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
                unlock_fb_info(info);
@@ -1752,6 +1765,17 @@ int remove_conflicting_framebuffers(struct apertures_struct *a,
                do_free = true;
        }
 
+       /*
+        * If a driver asked to unregister a platform device registered by
+        * sysfb, then can be assumed that this is a driver for a display
+        * that is set up by the system firmware and has a generic driver.
+        *
+        * Drivers for devices that don't have a generic driver will never
+        * ask for this, so let's assume that a real driver for the display
+        * was already probed and prevent sysfb to register devices later.
+        */
+       sysfb_disable();
+
        mutex_lock(&registration_lock);
        do_remove_conflicting_framebuffers(a, name, primary);
        mutex_unlock(&registration_lock);
index a957996..5647fca 100644 (file)
@@ -472,7 +472,7 @@ static int intelfb_pci_register(struct pci_dev *pdev,
        struct fb_info *info;
        struct intelfb_info *dinfo;
        int i, err, dvo;
-       int aperture_size, stolen_size;
+       int aperture_size, stolen_size = 0;
        struct agp_kern_info gtt_info;
        int agp_memtype;
        const char *s;
@@ -571,7 +571,7 @@ static int intelfb_pci_register(struct pci_dev *pdev,
                return -ENODEV;
        }
 
-       if (intelfbhw_get_memory(pdev, &aperture_size,&stolen_size)) {
+       if (intelfbhw_get_memory(pdev, &aperture_size, &stolen_size)) {
                cleanup(dinfo);
                return -ENODEV;
        }
index 57aff74..2086e06 100644 (file)
@@ -201,13 +201,11 @@ int intelfbhw_get_memory(struct pci_dev *pdev, int *aperture_size,
        case PCI_DEVICE_ID_INTEL_945GME:
        case PCI_DEVICE_ID_INTEL_965G:
        case PCI_DEVICE_ID_INTEL_965GM:
-               /* 915, 945 and 965 chipsets support a 256MB aperture.
-                  Aperture size is determined by inspected the
-                  base address of the aperture. */
-               if (pci_resource_start(pdev, 2) & 0x08000000)
-                       *aperture_size = MB(128);
-               else
-                       *aperture_size = MB(256);
+               /*
+                * 915, 945 and 965 chipsets support 64MB, 128MB or 256MB
+                * aperture. Determine size from PCI resource length.
+                */
+               *aperture_size = pci_resource_len(pdev, 2);
                break;
        default:
                if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
index c90eb8c..66aff6c 100644 (file)
@@ -359,7 +359,7 @@ static void sossi_set_bits_per_cycle(int bpc)
        int bus_pick_count, bus_pick_width;
 
        /*
-        * We set explicitly the the bus_pick_count as well, although
+        * We set explicitly the bus_pick_count as well, although
         * with remapping/reordering disabled it will be calculated by HW
         * as (32 / bus_pick_width).
         */
index 6fbfeb0..170463a 100644 (file)
@@ -143,7 +143,7 @@ int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk,
        /*
         * In OMAP5+, the HFBITCLK must be divided by 2 before issuing the
         * HDMI_PHYPWRCMD_LDOON command.
-       */
+        */
        if (phy_feat->bist_ctrl)
                REG_FLD_MOD(phy->base, HDMI_TXPHY_BIST_CONTROL, 1, 11, 11);
 
index 043cc8f..c3cd1e1 100644 (file)
@@ -381,7 +381,7 @@ pxa3xx_gcu_write(struct file *file, const char *buff,
        struct pxa3xx_gcu_batch *buffer;
        struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
 
-       int words = count / 4;
+       size_t words = count / 4;
 
        /* Does not need to be atomic. There's a lock in user space,
         * but anyhow, this is just for statistics. */
index 2c19856..f96ce88 100644 (file)
@@ -237,8 +237,7 @@ static int simplefb_clocks_get(struct simplefb_par *par,
                if (IS_ERR(clock)) {
                        if (PTR_ERR(clock) == -EPROBE_DEFER) {
                                while (--i >= 0) {
-                                       if (par->clks[i])
-                                               clk_put(par->clks[i]);
+                                       clk_put(par->clks[i]);
                                }
                                kfree(par->clks);
                                return -EPROBE_DEFER;
index bcacfb6..d119b1d 100644 (file)
@@ -96,7 +96,7 @@ static const struct fb_fix_screeninfo xxxfb_fix = {
 
     /*
      *         Modern graphical hardware not only supports pipelines but some 
-     *  also support multiple monitors where each display can have its  
+     *  also support multiple monitors where each display can have
      *  its own unique data. In this case each display could be  
      *  represented by a separate framebuffer device thus a separate 
      *  struct fb_info. Now the struct xxx_par represents the graphics
@@ -838,9 +838,9 @@ static void xxxfb_remove(struct pci_dev *dev)
  *
  *      See Documentation/driver-api/pm/devices.rst for more information
  */
-static int xxxfb_suspend(struct pci_dev *dev, pm_message_t msg)
+static int xxxfb_suspend(struct device *dev)
 {
-       struct fb_info *info = pci_get_drvdata(dev);
+       struct fb_info *info = dev_get_drvdata(dev);
        struct xxxfb_par *par = info->par;
 
        /* suspend here */
@@ -853,9 +853,9 @@ static int xxxfb_suspend(struct pci_dev *dev, pm_message_t msg)
  *
  *      See Documentation/driver-api/pm/devices.rst for more information
  */
-static int xxxfb_resume(struct pci_dev *dev)
+static int xxxfb_resume(struct device *dev)
 {
-       struct fb_info *info = pci_get_drvdata(dev);
+       struct fb_info *info = dev_get_drvdata(dev);
        struct xxxfb_par *par = info->par;
 
        /* resume here */
@@ -873,14 +873,15 @@ static const struct pci_device_id xxxfb_id_table[] = {
        { 0, }
 };
 
+static SIMPLE_DEV_PM_OPS(xxxfb_pm_ops, xxxfb_suspend, xxxfb_resume);
+
 /* For PCI drivers */
 static struct pci_driver xxxfb_driver = {
        .name =         "xxxfb",
        .id_table =     xxxfb_id_table,
        .probe =        xxxfb_probe,
        .remove =       xxxfb_remove,
-       .suspend =      xxxfb_suspend, /* optional but recommended */
-       .resume =       xxxfb_resume,  /* optional but recommended */
+       .driver.pm =    xxxfb_pm_ops, /* optional but recommended */
 };
 
 MODULE_DEVICE_TABLE(pci, xxxfb_id_table);
index 90ce16b..f422f9c 100644 (file)
@@ -632,16 +632,19 @@ static int __init sev_guest_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct snp_guest_dev *snp_dev;
        struct miscdevice *misc;
+       void __iomem *mapping;
        int ret;
 
        if (!dev->platform_data)
                return -ENODEV;
 
        data = (struct sev_guest_platform_data *)dev->platform_data;
-       layout = (__force void *)ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
-       if (!layout)
+       mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
+       if (!mapping)
                return -ENODEV;
 
+       layout = (__force void *)mapping;
+
        ret = -ENOMEM;
        snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
        if (!snp_dev)
@@ -706,7 +709,7 @@ e_free_response:
 e_free_request:
        free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
 e_unmap:
-       iounmap(layout);
+       iounmap(mapping);
        return ret;
 }
 
index a6dc8b5..e1556d2 100644 (file)
@@ -29,6 +29,19 @@ menuconfig VIRTIO_MENU
 
 if VIRTIO_MENU
 
+config VIRTIO_HARDEN_NOTIFICATION
+        bool "Harden virtio notification"
+        help
+          Enable this to harden the device notifications and suppress
+          those that happen at a time where notifications are illegal.
+
+          Experimental: Note that several drivers still have bugs that
+          may cause crashes or hangs when correct handling of
+          notifications is enforced; depending on the subset of
+          drivers and devices you use, this may or may not work.
+
+          If unsure, say N.
+
 config VIRTIO_PCI
        tristate "PCI driver for virtio devices"
        depends on PCI
index 6bace84..7deeed3 100644 (file)
@@ -219,6 +219,7 @@ static int virtio_features_ok(struct virtio_device *dev)
  * */
 void virtio_reset_device(struct virtio_device *dev)
 {
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
        /*
         * The below virtio_synchronize_cbs() guarantees that any
         * interrupt for this line arriving after
@@ -227,6 +228,7 @@ void virtio_reset_device(struct virtio_device *dev)
         */
        virtio_break_device(dev);
        virtio_synchronize_cbs(dev);
+#endif
 
        dev->config->reset(dev);
 }
index c9bec38..083ff1e 100644 (file)
@@ -62,6 +62,7 @@
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/virtio.h>
@@ -556,6 +557,28 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
        .synchronize_cbs = vm_synchronize_cbs,
 };
 
+#ifdef CONFIG_PM_SLEEP
+static int virtio_mmio_freeze(struct device *dev)
+{
+       struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
+
+       return virtio_device_freeze(&vm_dev->vdev);
+}
+
+static int virtio_mmio_restore(struct device *dev)
+{
+       struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
+
+       if (vm_dev->version == 1)
+               writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
+
+       return virtio_device_restore(&vm_dev->vdev);
+}
+
+static const struct dev_pm_ops virtio_mmio_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(virtio_mmio_freeze, virtio_mmio_restore)
+};
+#endif
 
 static void virtio_mmio_release_dev(struct device *_d)
 {
@@ -799,6 +822,9 @@ static struct platform_driver virtio_mmio_driver = {
                .name   = "virtio-mmio",
                .of_match_table = virtio_mmio_match,
                .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
+#ifdef CONFIG_PM_SLEEP
+               .pm     = &virtio_mmio_pm_ops,
+#endif
        },
 };
 
index b790f30..fa2a944 100644 (file)
@@ -220,8 +220,6 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
 
        check_offsets();
 
-       mdev->pci_dev = pci_dev;
-
        /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
        if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
                return -ENODEV;
index 13a7348..643ca77 100644 (file)
@@ -111,7 +111,12 @@ struct vring_virtqueue {
        /* Number we've added since last sync. */
        unsigned int num_added;
 
-       /* Last used index we've seen. */
+       /* Last used index  we've seen.
+        * for split ring, it just contains last used index
+        * for packed ring:
+        * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index.
+        * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter.
+        */
        u16 last_used_idx;
 
        /* Hint for event idx: already triggered no need to disable. */
@@ -154,9 +159,6 @@ struct vring_virtqueue {
                        /* Driver ring wrap counter. */
                        bool avail_wrap_counter;
 
-                       /* Device ring wrap counter. */
-                       bool used_wrap_counter;
-
                        /* Avail used flags. */
                        u16 avail_used_flags;
 
@@ -933,7 +935,7 @@ static struct virtqueue *vring_create_virtqueue_split(
        for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
                queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
                                          &dma_addr,
-                                         GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+                                         GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
                if (queue)
                        break;
                if (!may_reduce_num)
@@ -973,6 +975,15 @@ static struct virtqueue *vring_create_virtqueue_split(
 /*
  * Packed ring specific functions - *_packed().
  */
+static inline bool packed_used_wrap_counter(u16 last_used_idx)
+{
+       return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
+}
+
+static inline u16 packed_last_used(u16 last_used_idx)
+{
+       return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR));
+}
 
 static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
                                     struct vring_desc_extra *extra)
@@ -1406,8 +1417,14 @@ static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
 
 static inline bool more_used_packed(const struct vring_virtqueue *vq)
 {
-       return is_used_desc_packed(vq, vq->last_used_idx,
-                       vq->packed.used_wrap_counter);
+       u16 last_used;
+       u16 last_used_idx;
+       bool used_wrap_counter;
+
+       last_used_idx = READ_ONCE(vq->last_used_idx);
+       last_used = packed_last_used(last_used_idx);
+       used_wrap_counter = packed_used_wrap_counter(last_used_idx);
+       return is_used_desc_packed(vq, last_used, used_wrap_counter);
 }
 
 static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
@@ -1415,7 +1432,8 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
                                          void **ctx)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
-       u16 last_used, id;
+       u16 last_used, id, last_used_idx;
+       bool used_wrap_counter;
        void *ret;
 
        START_USE(vq);
@@ -1434,7 +1452,9 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
        /* Only get used elements after they have been exposed by host. */
        virtio_rmb(vq->weak_barriers);
 
-       last_used = vq->last_used_idx;
+       last_used_idx = READ_ONCE(vq->last_used_idx);
+       used_wrap_counter = packed_used_wrap_counter(last_used_idx);
+       last_used = packed_last_used(last_used_idx);
        id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
        *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
 
@@ -1451,12 +1471,15 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
        ret = vq->packed.desc_state[id].data;
        detach_buf_packed(vq, id, ctx);
 
-       vq->last_used_idx += vq->packed.desc_state[id].num;
-       if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
-               vq->last_used_idx -= vq->packed.vring.num;
-               vq->packed.used_wrap_counter ^= 1;
+       last_used += vq->packed.desc_state[id].num;
+       if (unlikely(last_used >= vq->packed.vring.num)) {
+               last_used -= vq->packed.vring.num;
+               used_wrap_counter ^= 1;
        }
 
+       last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
+       WRITE_ONCE(vq->last_used_idx, last_used);
+
        /*
         * If we expect an interrupt for the next entry, tell host
         * by writing event index and flush out the write before
@@ -1465,9 +1488,7 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
        if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
                virtio_store_mb(vq->weak_barriers,
                                &vq->packed.vring.driver->off_wrap,
-                               cpu_to_le16(vq->last_used_idx |
-                                       (vq->packed.used_wrap_counter <<
-                                        VRING_PACKED_EVENT_F_WRAP_CTR)));
+                               cpu_to_le16(vq->last_used_idx));
 
        LAST_ADD_TIME_INVALID(vq);
 
@@ -1499,9 +1520,7 @@ static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
 
        if (vq->event) {
                vq->packed.vring.driver->off_wrap =
-                       cpu_to_le16(vq->last_used_idx |
-                               (vq->packed.used_wrap_counter <<
-                                VRING_PACKED_EVENT_F_WRAP_CTR));
+                       cpu_to_le16(vq->last_used_idx);
                /*
                 * We need to update event offset and event wrap
                 * counter first before updating event flags.
@@ -1518,8 +1537,7 @@ static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
        }
 
        END_USE(vq);
-       return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
-                       VRING_PACKED_EVENT_F_WRAP_CTR);
+       return vq->last_used_idx;
 }
 
 static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
@@ -1537,7 +1555,7 @@ static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
-       u16 used_idx, wrap_counter;
+       u16 used_idx, wrap_counter, last_used_idx;
        u16 bufs;
 
        START_USE(vq);
@@ -1550,9 +1568,10 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
        if (vq->event) {
                /* TODO: tune this threshold */
                bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
-               wrap_counter = vq->packed.used_wrap_counter;
+               last_used_idx = READ_ONCE(vq->last_used_idx);
+               wrap_counter = packed_used_wrap_counter(last_used_idx);
 
-               used_idx = vq->last_used_idx + bufs;
+               used_idx = packed_last_used(last_used_idx) + bufs;
                if (used_idx >= vq->packed.vring.num) {
                        used_idx -= vq->packed.vring.num;
                        wrap_counter ^= 1;
@@ -1582,9 +1601,10 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
         */
        virtio_mb(vq->weak_barriers);
 
-       if (is_used_desc_packed(vq,
-                               vq->last_used_idx,
-                               vq->packed.used_wrap_counter)) {
+       last_used_idx = READ_ONCE(vq->last_used_idx);
+       wrap_counter = packed_used_wrap_counter(last_used_idx);
+       used_idx = packed_last_used(last_used_idx);
+       if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
                END_USE(vq);
                return false;
        }
@@ -1688,8 +1708,12 @@ static struct virtqueue *vring_create_virtqueue_packed(
        vq->we_own_ring = true;
        vq->notify = notify;
        vq->weak_barriers = weak_barriers;
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
        vq->broken = true;
-       vq->last_used_idx = 0;
+#else
+       vq->broken = false;
+#endif
+       vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
        vq->event_triggered = false;
        vq->num_added = 0;
        vq->packed_ring = true;
@@ -1720,7 +1744,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
 
        vq->packed.next_avail_idx = 0;
        vq->packed.avail_wrap_counter = 1;
-       vq->packed.used_wrap_counter = 1;
        vq->packed.event_flags_shadow = 0;
        vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
 
@@ -2135,9 +2158,13 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
        }
 
        if (unlikely(vq->broken)) {
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
                dev_warn_once(&vq->vq.vdev->dev,
                              "virtio vring IRQ raised before DRIVER_OK");
                return IRQ_NONE;
+#else
+               return IRQ_HANDLED;
+#endif
        }
 
        /* Just a hint for performance: so it's ok that this can be racy! */
@@ -2180,7 +2207,11 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
        vq->we_own_ring = false;
        vq->notify = notify;
        vq->weak_barriers = weak_barriers;
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
        vq->broken = true;
+#else
+       vq->broken = false;
+#endif
        vq->last_used_idx = 0;
        vq->event_triggered = false;
        vq->num_added = 0;
index b0b2d7a..2fd85be 100644 (file)
@@ -172,3 +172,4 @@ module_platform_driver(gxp_wdt_driver);
 MODULE_AUTHOR("Nick Hawkins <nick.hawkins@hpe.com>");
 MODULE_AUTHOR("Jean-Marie Verdun <verdun@hpe.com>");
 MODULE_DESCRIPTION("Driver for GXP watchdog timer");
+MODULE_LICENSE("GPL");
index 7b59144..87f1828 100644 (file)
@@ -42,7 +42,7 @@ void xen_setup_features(void)
                if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
                        break;
                for (j = 0; j < 32; j++)
-                       xen_features[i * 32 + j] = !!(fi.submap & 1<<j);
+                       xen_features[i * 32 + j] = !!(fi.submap & 1U << j);
        }
 
        if (xen_pv_domain()) {
index 20d7d05..40ef379 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/mmu_notifier.h>
 #include <linux/types.h>
 #include <xen/interface/event_channel.h>
+#include <xen/grant_table.h>
 
 struct gntdev_dmabuf_priv;
 
@@ -56,6 +57,7 @@ struct gntdev_grant_map {
        struct gnttab_unmap_grant_ref *unmap_ops;
        struct gnttab_map_grant_ref   *kmap_ops;
        struct gnttab_unmap_grant_ref *kunmap_ops;
+       bool *being_removed;
        struct page **pages;
        unsigned long pages_vm_start;
 
@@ -73,6 +75,11 @@ struct gntdev_grant_map {
        /* Needed to avoid allocation in gnttab_dma_free_pages(). */
        xen_pfn_t *frames;
 #endif
+
+       /* Number of live grants */
+       atomic_t live_grants;
+       /* Needed to avoid allocation in __unmap_grant_pages */
+       struct gntab_unmap_queue_data unmap_data;
 };
 
 struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
index 59ffea8..84b143e 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/slab.h>
 #include <linux/highmem.h>
 #include <linux/refcount.h>
+#include <linux/workqueue.h>
 
 #include <xen/xen.h>
 #include <xen/grant_table.h>
@@ -60,10 +61,11 @@ module_param(limit, uint, 0644);
 MODULE_PARM_DESC(limit,
        "Maximum number of grants that may be mapped by one mapping request");
 
+/* True in PV mode, false otherwise */
 static int use_ptemod;
 
-static int unmap_grant_pages(struct gntdev_grant_map *map,
-                            int offset, int pages);
+static void unmap_grant_pages(struct gntdev_grant_map *map,
+                             int offset, int pages);
 
 static struct miscdevice gntdev_miscdev;
 
@@ -120,6 +122,7 @@ static void gntdev_free_map(struct gntdev_grant_map *map)
        kvfree(map->unmap_ops);
        kvfree(map->kmap_ops);
        kvfree(map->kunmap_ops);
+       kvfree(map->being_removed);
        kfree(map);
 }
 
@@ -140,10 +143,13 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
        add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]),
                                        GFP_KERNEL);
        add->pages     = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
+       add->being_removed =
+               kvcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL);
        if (NULL == add->grants    ||
            NULL == add->map_ops   ||
            NULL == add->unmap_ops ||
-           NULL == add->pages)
+           NULL == add->pages     ||
+           NULL == add->being_removed)
                goto err;
        if (use_ptemod) {
                add->kmap_ops   = kvmalloc_array(count, sizeof(add->kmap_ops[0]),
@@ -250,9 +256,36 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
        if (!refcount_dec_and_test(&map->users))
                return;
 
-       if (map->pages && !use_ptemod)
+       if (map->pages && !use_ptemod) {
+               /*
+                * Increment the reference count.  This ensures that the
+                * subsequent call to unmap_grant_pages() will not wind up
+                * re-entering itself.  It *can* wind up calling
+                * gntdev_put_map() recursively, but such calls will be with a
+                * reference count greater than 1, so they will return before
+                * this code is reached.  The recursion depth is thus limited to
+                * 1.  Do NOT use refcount_inc() here, as it will detect that
+                * the reference count is zero and WARN().
+                */
+               refcount_set(&map->users, 1);
+
+               /*
+                * Unmap the grants.  This may or may not be asynchronous, so it
+                * is possible that the reference count is 1 on return, but it
+                * could also be greater than 1.
+                */
                unmap_grant_pages(map, 0, map->count);
 
+               /* Check if the memory now needs to be freed */
+               if (!refcount_dec_and_test(&map->users))
+                       return;
+
+               /*
+                * All pages have been returned to the hypervisor, so free the
+                * map.
+                */
+       }
+
        if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
                notify_remote_via_evtchn(map->notify.event);
                evtchn_put(map->notify.event);
@@ -283,6 +316,7 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
 
 int gntdev_map_grant_pages(struct gntdev_grant_map *map)
 {
+       size_t alloced = 0;
        int i, err = 0;
 
        if (!use_ptemod) {
@@ -331,97 +365,118 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
                        map->count);
 
        for (i = 0; i < map->count; i++) {
-               if (map->map_ops[i].status == GNTST_okay)
+               if (map->map_ops[i].status == GNTST_okay) {
                        map->unmap_ops[i].handle = map->map_ops[i].handle;
-               else if (!err)
+                       if (!use_ptemod)
+                               alloced++;
+               } else if (!err)
                        err = -EINVAL;
 
                if (map->flags & GNTMAP_device_map)
                        map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
 
                if (use_ptemod) {
-                       if (map->kmap_ops[i].status == GNTST_okay)
+                       if (map->kmap_ops[i].status == GNTST_okay) {
+                               if (map->map_ops[i].status == GNTST_okay)
+                                       alloced++;
                                map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
-                       else if (!err)
+                       else if (!err)
                                err = -EINVAL;
                }
        }
+       atomic_add(alloced, &map->live_grants);
        return err;
 }
 
-static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
-                              int pages)
+static void __unmap_grant_pages_done(int result,
+               struct gntab_unmap_queue_data *data)
 {
-       int i, err = 0;
-       struct gntab_unmap_queue_data unmap_data;
-
-       if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
-               int pgno = (map->notify.addr >> PAGE_SHIFT);
-               if (pgno >= offset && pgno < offset + pages) {
-                       /* No need for kmap, pages are in lowmem */
-                       uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
-                       tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
-                       map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
-               }
-       }
-
-       unmap_data.unmap_ops = map->unmap_ops + offset;
-       unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
-       unmap_data.pages = map->pages + offset;
-       unmap_data.count = pages;
-
-       err = gnttab_unmap_refs_sync(&unmap_data);
-       if (err)
-               return err;
+       unsigned int i;
+       struct gntdev_grant_map *map = data->data;
+       unsigned int offset = data->unmap_ops - map->unmap_ops;
 
-       for (i = 0; i < pages; i++) {
-               if (map->unmap_ops[offset+i].status)
-                       err = -EINVAL;
+       for (i = 0; i < data->count; i++) {
+               WARN_ON(map->unmap_ops[offset + i].status != GNTST_okay &&
+                       map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
                pr_debug("unmap handle=%d st=%d\n",
                        map->unmap_ops[offset+i].handle,
                        map->unmap_ops[offset+i].status);
                map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
                if (use_ptemod) {
-                       if (map->kunmap_ops[offset+i].status)
-                               err = -EINVAL;
+                       WARN_ON(map->kunmap_ops[offset + i].status != GNTST_okay &&
+                               map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
                        pr_debug("kunmap handle=%u st=%d\n",
                                 map->kunmap_ops[offset+i].handle,
                                 map->kunmap_ops[offset+i].status);
                        map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
                }
        }
-       return err;
+       /*
+        * Decrease the live-grant counter.  This must happen after the loop to
+        * prevent premature reuse of the grants by gnttab_mmap().
+        */
+       atomic_sub(data->count, &map->live_grants);
+
+       /* Release reference taken by __unmap_grant_pages */
+       gntdev_put_map(NULL, map);
+}
+
+static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+                              int pages)
+{
+       if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
+               int pgno = (map->notify.addr >> PAGE_SHIFT);
+
+               if (pgno >= offset && pgno < offset + pages) {
+                       /* No need for kmap, pages are in lowmem */
+                       uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
+
+                       tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
+                       map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
+               }
+       }
+
+       map->unmap_data.unmap_ops = map->unmap_ops + offset;
+       map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
+       map->unmap_data.pages = map->pages + offset;
+       map->unmap_data.count = pages;
+       map->unmap_data.done = __unmap_grant_pages_done;
+       map->unmap_data.data = map;
+       refcount_inc(&map->users); /* to keep map alive during async call below */
+
+       gnttab_unmap_refs_async(&map->unmap_data);
 }
 
-static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
-                            int pages)
+static void unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+                             int pages)
 {
-       int range, err = 0;
+       int range;
+
+       if (atomic_read(&map->live_grants) == 0)
+               return; /* Nothing to do */
 
        pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
 
        /* It is possible the requested range will have a "hole" where we
         * already unmapped some of the grants. Only unmap valid ranges.
         */
-       while (pages && !err) {
-               while (pages &&
-                      map->unmap_ops[offset].handle == INVALID_GRANT_HANDLE) {
+       while (pages) {
+               while (pages && map->being_removed[offset]) {
                        offset++;
                        pages--;
                }
                range = 0;
                while (range < pages) {
-                       if (map->unmap_ops[offset + range].handle ==
-                           INVALID_GRANT_HANDLE)
+                       if (map->being_removed[offset + range])
                                break;
+                       map->being_removed[offset + range] = true;
                        range++;
                }
-               err = __unmap_grant_pages(map, offset, range);
+               if (range)
+                       __unmap_grant_pages(map, offset, range);
                offset += range;
                pages -= range;
        }
-
-       return err;
 }
 
 /* ------------------------------------------------------------------ */
@@ -473,7 +528,6 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
        struct gntdev_grant_map *map =
                container_of(mn, struct gntdev_grant_map, notifier);
        unsigned long mstart, mend;
-       int err;
 
        if (!mmu_notifier_range_blockable(range))
                return false;
@@ -494,10 +548,9 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
                        map->index, map->count,
                        map->vma->vm_start, map->vma->vm_end,
                        range->start, range->end, mstart, mend);
-       err = unmap_grant_pages(map,
+       unmap_grant_pages(map,
                                (mstart - map->vma->vm_start) >> PAGE_SHIFT,
                                (mend - mstart) >> PAGE_SHIFT);
-       WARN_ON(err);
 
        return true;
 }
@@ -985,6 +1038,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
                goto unlock_out;
        if (use_ptemod && map->vma)
                goto unlock_out;
+       if (atomic_read(&map->live_grants)) {
+               err = -EAGAIN;
+               goto unlock_out;
+       }
        refcount_inc(&map->users);
 
        vma->vm_ops = &gntdev_vmops;
index 79df61f..baf2b15 100644 (file)
@@ -152,7 +152,7 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
        const unsigned char **wnames, *uname;
        int i, n, l, clone, access;
        struct v9fs_session_info *v9ses;
-       struct p9_fid *fid, *old_fid = NULL;
+       struct p9_fid *fid, *old_fid;
 
        v9ses = v9fs_dentry2v9ses(dentry);
        access = v9ses->flags & V9FS_ACCESS_MASK;
@@ -194,13 +194,12 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
                if (IS_ERR(fid))
                        return fid;
 
+               refcount_inc(&fid->count);
                v9fs_fid_add(dentry->d_sb->s_root, fid);
        }
        /* If we are root ourself just return that */
-       if (dentry->d_sb->s_root == dentry) {
-               refcount_inc(&fid->count);
+       if (dentry->d_sb->s_root == dentry)
                return fid;
-       }
        /*
         * Do a multipath walk with attached root.
         * When walking parent we need to make sure we
@@ -212,6 +211,7 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
                fid = ERR_PTR(n);
                goto err_out;
        }
+       old_fid = fid;
        clone = 1;
        i = 0;
        while (i < n) {
@@ -221,19 +221,15 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
                 * walk to ensure none of the patch component change
                 */
                fid = p9_client_walk(fid, l, &wnames[i], clone);
+               /* non-cloning walk will return the same fid */
+               if (fid != old_fid) {
+                       p9_client_clunk(old_fid);
+                       old_fid = fid;
+               }
                if (IS_ERR(fid)) {
-                       if (old_fid) {
-                               /*
-                                * If we fail, clunk fid which are mapping
-                                * to path component and not the last component
-                                * of the path.
-                                */
-                               p9_client_clunk(old_fid);
-                       }
                        kfree(wnames);
                        goto err_out;
                }
-               old_fid = fid;
                i += l;
                clone = 0;
        }
index a8f512b..d0833fa 100644 (file)
@@ -58,8 +58,21 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
  */
 static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
 {
+       struct inode *inode = file_inode(file);
+       struct v9fs_inode *v9inode = V9FS_I(inode);
        struct p9_fid *fid = file->private_data;
 
+       BUG_ON(!fid);
+
+       /* we might need to read from a fid that was opened write-only
+        * for read-modify-write of page cache, use the writeback fid
+        * for that */
+       if (rreq->origin == NETFS_READ_FOR_WRITE &&
+                       (fid->mode & O_ACCMODE) == O_WRONLY) {
+               fid = v9inode->writeback_fid;
+               BUG_ON(!fid);
+       }
+
        refcount_inc(&fid->count);
        rreq->netfs_priv = fid;
        return 0;
index 419d2f3..3d82977 100644 (file)
@@ -1251,15 +1251,15 @@ static const char *v9fs_vfs_get_link(struct dentry *dentry,
                return ERR_PTR(-ECHILD);
 
        v9ses = v9fs_dentry2v9ses(dentry);
-       fid = v9fs_fid_lookup(dentry);
+       if (!v9fs_proto_dotu(v9ses))
+               return ERR_PTR(-EBADF);
+
        p9_debug(P9_DEBUG_VFS, "%pd\n", dentry);
+       fid = v9fs_fid_lookup(dentry);
 
        if (IS_ERR(fid))
                return ERR_CAST(fid);
 
-       if (!v9fs_proto_dotu(v9ses))
-               return ERR_PTR(-EBADF);
-
        st = p9_client_stat(fid);
        p9_client_clunk(fid);
        if (IS_ERR(st))
index d17502a..b6eb116 100644 (file)
@@ -274,6 +274,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
        if (IS_ERR(ofid)) {
                err = PTR_ERR(ofid);
                p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+               p9_client_clunk(dfid);
                goto out;
        }
 
@@ -285,6 +286,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
        if (err) {
                p9_debug(P9_DEBUG_VFS, "Failed to get acl values in creat %d\n",
                         err);
+               p9_client_clunk(dfid);
                goto error;
        }
        err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
@@ -292,6 +294,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
        if (err < 0) {
                p9_debug(P9_DEBUG_VFS, "p9_client_open_dotl failed in creat %d\n",
                         err);
+               p9_client_clunk(dfid);
                goto error;
        }
        v9fs_invalidate_inode_attr(dir);
index 42118a4..d1cfb23 100644 (file)
@@ -375,7 +375,7 @@ static int afs_begin_cache_operation(struct netfs_io_request *rreq)
 }
 
 static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
-                                struct folio *folio, void **_fsdata)
+                                struct folio **foliop, void **_fsdata)
 {
        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
 
index 89630ac..64dab70 100644 (file)
@@ -745,7 +745,8 @@ int afs_getattr(struct user_namespace *mnt_userns, const struct path *path,
 
        _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation);
 
-       if (!(query_flags & AT_STATX_DONT_SYNC) &&
+       if (vnode->volume &&
+           !(query_flags & AT_STATX_DONT_SYNC) &&
            !test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
                key = afs_request_key(vnode->volume->cell);
                if (IS_ERR(key))
index 66899b6..b5b8835 100644 (file)
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -22,7 +22,7 @@
  * chown_ok - verify permissions to chown inode
  * @mnt_userns:        user namespace of the mount @inode was found from
  * @inode:     inode to check permissions on
- * @uid:       uid to chown @inode to
+ * @ia_vfsuid: uid to chown @inode to
  *
  * If the inode has been found through an idmapped mount the user namespace of
  * the vfsmount must be passed through @mnt_userns. This function will then
  * performed on the raw inode simply passs init_user_ns.
  */
 static bool chown_ok(struct user_namespace *mnt_userns,
-                    const struct inode *inode,
-                    kuid_t uid)
+                    const struct inode *inode, vfsuid_t ia_vfsuid)
 {
-       kuid_t kuid = i_uid_into_mnt(mnt_userns, inode);
-       if (uid_eq(current_fsuid(), kuid) && uid_eq(uid, inode->i_uid))
+       vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_userns, inode);
+       if (vfsuid_eq_kuid(vfsuid, current_fsuid()) &&
+           vfsuid_eq(ia_vfsuid, vfsuid))
                return true;
        if (capable_wrt_inode_uidgid(mnt_userns, inode, CAP_CHOWN))
                return true;
-       if (uid_eq(kuid, INVALID_UID) &&
+       if (!vfsuid_valid(vfsuid) &&
            ns_capable(inode->i_sb->s_user_ns, CAP_CHOWN))
                return true;
        return false;
@@ -49,7 +49,7 @@ static bool chown_ok(struct user_namespace *mnt_userns,
  * chgrp_ok - verify permissions to chgrp inode
  * @mnt_userns:        user namespace of the mount @inode was found from
  * @inode:     inode to check permissions on
- * @gid:       gid to chown @inode to
+ * @ia_vfsgid: gid to chown @inode to
  *
  * If the inode has been found through an idmapped mount the user namespace of
  * the vfsmount must be passed through @mnt_userns. This function will then
@@ -58,15 +58,19 @@ static bool chown_ok(struct user_namespace *mnt_userns,
  * performed on the raw inode simply passs init_user_ns.
  */
 static bool chgrp_ok(struct user_namespace *mnt_userns,
-                    const struct inode *inode, kgid_t gid)
+                    const struct inode *inode, vfsgid_t ia_vfsgid)
 {
-       kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
-       if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode)) &&
-           (in_group_p(gid) || gid_eq(gid, inode->i_gid)))
-               return true;
+       vfsgid_t vfsgid = i_gid_into_vfsgid(mnt_userns, inode);
+       vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_userns, inode);
+       if (vfsuid_eq_kuid(vfsuid, current_fsuid())) {
+               if (vfsgid_eq(ia_vfsgid, vfsgid))
+                       return true;
+               if (vfsgid_in_group_p(ia_vfsgid))
+                       return true;
+       }
        if (capable_wrt_inode_uidgid(mnt_userns, inode, CAP_CHOWN))
                return true;
-       if (gid_eq(kgid, INVALID_GID) &&
+       if (!vfsgid_valid(vfsgid) &&
            ns_capable(inode->i_sb->s_user_ns, CAP_CHOWN))
                return true;
        return false;
@@ -114,21 +118,30 @@ int setattr_prepare(struct user_namespace *mnt_userns, struct dentry *dentry,
                goto kill_priv;
 
        /* Make sure a caller can chown. */
-       if ((ia_valid & ATTR_UID) && !chown_ok(mnt_userns, inode, attr->ia_uid))
+       if ((ia_valid & ATTR_UID) &&
+           !chown_ok(mnt_userns, inode, attr->ia_vfsuid))
                return -EPERM;
 
        /* Make sure caller can chgrp. */
-       if ((ia_valid & ATTR_GID) && !chgrp_ok(mnt_userns, inode, attr->ia_gid))
+       if ((ia_valid & ATTR_GID) &&
+           !chgrp_ok(mnt_userns, inode, attr->ia_vfsgid))
                return -EPERM;
 
        /* Make sure a caller can chmod. */
        if (ia_valid & ATTR_MODE) {
+               vfsgid_t vfsgid;
+
                if (!inode_owner_or_capable(mnt_userns, inode))
                        return -EPERM;
+
+               if (ia_valid & ATTR_GID)
+                       vfsgid = attr->ia_vfsgid;
+               else
+                       vfsgid = i_gid_into_vfsgid(mnt_userns, inode);
+
                /* Also check the setgid bit! */
-               if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
-                                i_gid_into_mnt(mnt_userns, inode)) &&
-                    !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
+               if (!vfsgid_in_group_p(vfsgid) &&
+                   !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
                        attr->ia_mode &= ~S_ISGID;
        }
 
@@ -205,9 +218,7 @@ EXPORT_SYMBOL(inode_newsize_ok);
  * setattr_copy must be called with i_mutex held.
  *
  * setattr_copy updates the inode's metadata with that specified
- * in attr on idmapped mounts. If file ownership is changed setattr_copy
- * doesn't map ia_uid and ia_gid. It will asssume the caller has already
- * provided the intended values. Necessary permission checks to determine
+ * in attr on idmapped mounts. Necessary permission checks to determine
  * whether or not the S_ISGID property needs to be removed are performed with
  * the correct idmapped mount permission helpers.
  * Noticeably missing is inode size update, which is more complex
@@ -228,10 +239,8 @@ void setattr_copy(struct user_namespace *mnt_userns, struct inode *inode,
 {
        unsigned int ia_valid = attr->ia_valid;
 
-       if (ia_valid & ATTR_UID)
-               inode->i_uid = attr->ia_uid;
-       if (ia_valid & ATTR_GID)
-               inode->i_gid = attr->ia_gid;
+       i_uid_update(mnt_userns, attr, inode);
+       i_gid_update(mnt_userns, attr, inode);
        if (ia_valid & ATTR_ATIME)
                inode->i_atime = attr->ia_atime;
        if (ia_valid & ATTR_MTIME)
@@ -240,8 +249,8 @@ void setattr_copy(struct user_namespace *mnt_userns, struct inode *inode,
                inode->i_ctime = attr->ia_ctime;
        if (ia_valid & ATTR_MODE) {
                umode_t mode = attr->ia_mode;
-               kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
-               if (!in_group_p(kgid) &&
+               vfsgid_t vfsgid = i_gid_into_vfsgid(mnt_userns, inode);
+               if (!vfsgid_in_group_p(vfsgid) &&
                    !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
                        mode &= ~S_ISGID;
                inode->i_mode = mode;
@@ -292,9 +301,6 @@ EXPORT_SYMBOL(may_setattr);
  * retry.  Because breaking a delegation may take a long time, the
  * caller should drop the i_mutex before doing so.
  *
- * If file ownership is changed notify_change() doesn't map ia_uid and
- * ia_gid. It will asssume the caller has already provided the intended values.
- *
  * Alternatively, a caller may pass NULL for delegated_inode.  This may
  * be appropriate for callers that expect the underlying filesystem not
  * to be NFS exported.  Also, passing NULL is fine for callers holding
@@ -383,23 +389,25 @@ int notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
         * namespace of the superblock.
         */
        if (ia_valid & ATTR_UID &&
-           !kuid_has_mapping(inode->i_sb->s_user_ns, attr->ia_uid))
+           !vfsuid_has_fsmapping(mnt_userns, inode->i_sb->s_user_ns,
+                                 attr->ia_vfsuid))
                return -EOVERFLOW;
        if (ia_valid & ATTR_GID &&
-           !kgid_has_mapping(inode->i_sb->s_user_ns, attr->ia_gid))
+           !vfsgid_has_fsmapping(mnt_userns, inode->i_sb->s_user_ns,
+                                 attr->ia_vfsgid))
                return -EOVERFLOW;
 
        /* Don't allow modifications of files with invalid uids or
         * gids unless those uids & gids are being made valid.
         */
        if (!(ia_valid & ATTR_UID) &&
-           !uid_valid(i_uid_into_mnt(mnt_userns, inode)))
+           !vfsuid_valid(i_uid_into_vfsuid(mnt_userns, inode)))
                return -EOVERFLOW;
        if (!(ia_valid & ATTR_GID) &&
-           !gid_valid(i_gid_into_mnt(mnt_userns, inode)))
+           !vfsgid_valid(i_gid_into_vfsgid(mnt_userns, inode)))
                return -EOVERFLOW;
 
-       error = security_inode_setattr(dentry, attr);
+       error = security_inode_setattr(mnt_userns, dentry, attr);
        if (error)
                return error;
        error = try_break_deleg(inode, delegated_inode);
index 3ac668a..35e0e86 100644 (file)
@@ -104,6 +104,7 @@ struct btrfs_block_group {
        unsigned int relocating_repair:1;
        unsigned int chunk_item_inserted:1;
        unsigned int zone_is_active:1;
+       unsigned int zoned_data_reloc_ongoing:1;
 
        int disk_cache_state;
 
index 0e49b1a..9c21e21 100644 (file)
@@ -675,9 +675,8 @@ struct btrfs_fs_info {
        rwlock_t global_root_lock;
        struct rb_root global_root_tree;
 
-       /* The xarray that holds all the FS roots */
-       spinlock_t fs_roots_lock;
-       struct xarray fs_roots;
+       spinlock_t fs_roots_radix_lock;
+       struct radix_tree_root fs_roots_radix;
 
        /* block group cache stuff */
        rwlock_t block_group_cache_lock;
@@ -995,10 +994,10 @@ struct btrfs_fs_info {
 
        struct btrfs_delayed_root *delayed_root;
 
-       /* Extent buffer xarray */
+       /* Extent buffer radix tree */
        spinlock_t buffer_lock;
        /* Entries are eb->start / sectorsize */
-       struct xarray extent_buffers;
+       struct radix_tree_root buffer_radix;
 
        /* next backup root to be overwritten */
        int backup_root_index;
@@ -1119,8 +1118,7 @@ enum {
         */
        BTRFS_ROOT_SHAREABLE,
        BTRFS_ROOT_TRACK_DIRTY,
-       /* The root is tracked in fs_info::fs_roots */
-       BTRFS_ROOT_REGISTERED,
+       BTRFS_ROOT_IN_RADIX,
        BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
        BTRFS_ROOT_DEFRAG_RUNNING,
        BTRFS_ROOT_FORCE_COW,
@@ -1224,10 +1222,10 @@ struct btrfs_root {
        struct rb_root inode_tree;
 
        /*
-        * Xarray that keeps track of delayed nodes of every inode, protected
-        * by inode_lock
+        * radix tree that keeps track of delayed nodes of every inode,
+        * protected by inode_lock
         */
-       struct xarray delayed_nodes;
+       struct radix_tree_root delayed_nodes_tree;
        /*
         * right now this just gets used so that a root has its own devid
         * for stat.  It may be used for more later
@@ -1330,6 +1328,8 @@ struct btrfs_replace_extent_info {
         * existing extent into a file range.
         */
        bool is_new_extent;
+       /* Indicate if we should update the inode's mtime and ctime. */
+       bool update_times;
        /* Meaningful only if is_new_extent is true. */
        int qgroup_reserved;
        /*
index 66779ab..748bf6b 100644 (file)
@@ -78,7 +78,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
        }
 
        spin_lock(&root->inode_lock);
-       node = xa_load(&root->delayed_nodes, ino);
+       node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
 
        if (node) {
                if (btrfs_inode->delayed_node) {
@@ -90,9 +90,9 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
 
                /*
                 * It's possible that we're racing into the middle of removing
-                * this node from the xarray.  In this case, the refcount
+                * this node from the radix tree.  In this case, the refcount
                 * was zero and it should never go back to one.  Just return
-                * NULL like it was never in the xarray at all; our release
+                * NULL like it was never in the radix at all; our release
                 * function is in the process of removing it.
                 *
                 * Some implementations of refcount_inc refuse to bump the
@@ -100,7 +100,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
                 * here, refcount_inc() may decide to just WARN_ONCE() instead
                 * of actually bumping the refcount.
                 *
-                * If this node is properly in the xarray, we want to bump the
+                * If this node is properly in the radix, we want to bump the
                 * refcount twice, once for the inode and once for this get
                 * operation.
                 */
@@ -128,30 +128,36 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
        u64 ino = btrfs_ino(btrfs_inode);
        int ret;
 
-       do {
-               node = btrfs_get_delayed_node(btrfs_inode);
-               if (node)
-                       return node;
+again:
+       node = btrfs_get_delayed_node(btrfs_inode);
+       if (node)
+               return node;
 
-               node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
-               if (!node)
-                       return ERR_PTR(-ENOMEM);
-               btrfs_init_delayed_node(node, root, ino);
+       node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
+       if (!node)
+               return ERR_PTR(-ENOMEM);
+       btrfs_init_delayed_node(node, root, ino);
 
-               /* Cached in the inode and can be accessed */
-               refcount_set(&node->refs, 2);
+       /* cached in the btrfs inode and can be accessed */
+       refcount_set(&node->refs, 2);
 
-               spin_lock(&root->inode_lock);
-               ret = xa_insert(&root->delayed_nodes, ino, node, GFP_NOFS);
-               if (ret) {
-                       spin_unlock(&root->inode_lock);
-                       kmem_cache_free(delayed_node_cache, node);
-                       if (ret != -EBUSY)
-                               return ERR_PTR(ret);
-               }
-       } while (ret);
+       ret = radix_tree_preload(GFP_NOFS);
+       if (ret) {
+               kmem_cache_free(delayed_node_cache, node);
+               return ERR_PTR(ret);
+       }
+
+       spin_lock(&root->inode_lock);
+       ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
+       if (ret == -EEXIST) {
+               spin_unlock(&root->inode_lock);
+               kmem_cache_free(delayed_node_cache, node);
+               radix_tree_preload_end();
+               goto again;
+       }
        btrfs_inode->delayed_node = node;
        spin_unlock(&root->inode_lock);
+       radix_tree_preload_end();
 
        return node;
 }
@@ -270,7 +276,8 @@ static void __btrfs_release_delayed_node(
                 * back up.  We can delete it now.
                 */
                ASSERT(refcount_read(&delayed_node->refs) == 0);
-               xa_erase(&root->delayed_nodes, delayed_node->inode_id);
+               radix_tree_delete(&root->delayed_nodes_tree,
+                                 delayed_node->inode_id);
                spin_unlock(&root->inode_lock);
                kmem_cache_free(delayed_node_cache, delayed_node);
        }
@@ -1863,35 +1870,34 @@ void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
 
 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
 {
-       unsigned long index = 0;
-       struct btrfs_delayed_node *delayed_node;
+       u64 inode_id = 0;
        struct btrfs_delayed_node *delayed_nodes[8];
+       int i, n;
 
        while (1) {
-               int n = 0;
-
                spin_lock(&root->inode_lock);
-               if (xa_empty(&root->delayed_nodes)) {
+               n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
+                                          (void **)delayed_nodes, inode_id,
+                                          ARRAY_SIZE(delayed_nodes));
+               if (!n) {
                        spin_unlock(&root->inode_lock);
-                       return;
+                       break;
                }
 
-               xa_for_each_start(&root->delayed_nodes, index, delayed_node, index) {
+               inode_id = delayed_nodes[n - 1]->inode_id + 1;
+               for (i = 0; i < n; i++) {
                        /*
                         * Don't increase refs in case the node is dead and
                         * about to be removed from the tree in the loop below
                         */
-                       if (refcount_inc_not_zero(&delayed_node->refs)) {
-                               delayed_nodes[n] = delayed_node;
-                               n++;
-                       }
-                       if (n >= ARRAY_SIZE(delayed_nodes))
-                               break;
+                       if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
+                               delayed_nodes[i] = NULL;
                }
-               index++;
                spin_unlock(&root->inode_lock);
 
-               for (int i = 0; i < n; i++) {
+               for (i = 0; i < n; i++) {
+                       if (!delayed_nodes[i])
+                               continue;
                        __btrfs_kill_delayed_node(delayed_nodes[i]);
                        btrfs_release_delayed_node(delayed_nodes[i]);
                }
index 89e94ea..de440eb 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/fs.h>
 #include <linux/blkdev.h>
+#include <linux/radix-tree.h>
 #include <linux/writeback.h>
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
@@ -485,7 +486,7 @@ static int csum_dirty_subpage_buffers(struct btrfs_fs_info *fs_info,
                uptodate = btrfs_subpage_test_uptodate(fs_info, page, cur,
                                                       fs_info->nodesize);
 
-               /* A dirty eb shouldn't disappear from extent_buffers */
+               /* A dirty eb shouldn't disappear from buffer_radix */
                if (WARN_ON(!eb))
                        return -EUCLEAN;
 
@@ -1158,7 +1159,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
        root->nr_delalloc_inodes = 0;
        root->nr_ordered_extents = 0;
        root->inode_tree = RB_ROOT;
-       xa_init_flags(&root->delayed_nodes, GFP_ATOMIC);
+       INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
 
        btrfs_init_root_block_rsv(root);
 
@@ -1210,9 +1211,9 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
        btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
 #ifdef CONFIG_BTRFS_DEBUG
        INIT_LIST_HEAD(&root->leak_list);
-       spin_lock(&fs_info->fs_roots_lock);
+       spin_lock(&fs_info->fs_roots_radix_lock);
        list_add_tail(&root->leak_list, &fs_info->allocated_roots);
-       spin_unlock(&fs_info->fs_roots_lock);
+       spin_unlock(&fs_info->fs_roots_radix_lock);
 #endif
 }
 
@@ -1659,11 +1660,12 @@ static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
 {
        struct btrfs_root *root;
 
-       spin_lock(&fs_info->fs_roots_lock);
-       root = xa_load(&fs_info->fs_roots, (unsigned long)root_id);
+       spin_lock(&fs_info->fs_roots_radix_lock);
+       root = radix_tree_lookup(&fs_info->fs_roots_radix,
+                                (unsigned long)root_id);
        if (root)
                root = btrfs_grab_root(root);
-       spin_unlock(&fs_info->fs_roots_lock);
+       spin_unlock(&fs_info->fs_roots_radix_lock);
        return root;
 }
 
@@ -1705,14 +1707,20 @@ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
 {
        int ret;
 
-       spin_lock(&fs_info->fs_roots_lock);
-       ret = xa_insert(&fs_info->fs_roots, (unsigned long)root->root_key.objectid,
-                       root, GFP_NOFS);
+       ret = radix_tree_preload(GFP_NOFS);
+       if (ret)
+               return ret;
+
+       spin_lock(&fs_info->fs_roots_radix_lock);
+       ret = radix_tree_insert(&fs_info->fs_roots_radix,
+                               (unsigned long)root->root_key.objectid,
+                               root);
        if (ret == 0) {
                btrfs_grab_root(root);
-               set_bit(BTRFS_ROOT_REGISTERED, &root->state);
+               set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
        }
-       spin_unlock(&fs_info->fs_roots_lock);
+       spin_unlock(&fs_info->fs_roots_radix_lock);
+       radix_tree_preload_end();
 
        return ret;
 }
@@ -2342,9 +2350,9 @@ void btrfs_put_root(struct btrfs_root *root)
                btrfs_drew_lock_destroy(&root->snapshot_lock);
                free_root_extent_buffers(root);
 #ifdef CONFIG_BTRFS_DEBUG
-               spin_lock(&root->fs_info->fs_roots_lock);
+               spin_lock(&root->fs_info->fs_roots_radix_lock);
                list_del_init(&root->leak_list);
-               spin_unlock(&root->fs_info->fs_roots_lock);
+               spin_unlock(&root->fs_info->fs_roots_radix_lock);
 #endif
                kfree(root);
        }
@@ -2352,21 +2360,28 @@ void btrfs_put_root(struct btrfs_root *root)
 
 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
 {
-       struct btrfs_root *root;
-       unsigned long index = 0;
+       int ret;
+       struct btrfs_root *gang[8];
+       int i;
 
        while (!list_empty(&fs_info->dead_roots)) {
-               root = list_entry(fs_info->dead_roots.next,
-                                 struct btrfs_root, root_list);
-               list_del(&root->root_list);
+               gang[0] = list_entry(fs_info->dead_roots.next,
+                                    struct btrfs_root, root_list);
+               list_del(&gang[0]->root_list);
 
-               if (test_bit(BTRFS_ROOT_REGISTERED, &root->state))
-                       btrfs_drop_and_free_fs_root(fs_info, root);
-               btrfs_put_root(root);
+               if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state))
+                       btrfs_drop_and_free_fs_root(fs_info, gang[0]);
+               btrfs_put_root(gang[0]);
        }
 
-       xa_for_each(&fs_info->fs_roots, index, root) {
-               btrfs_drop_and_free_fs_root(fs_info, root);
+       while (1) {
+               ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+                                            (void **)gang, 0,
+                                            ARRAY_SIZE(gang));
+               if (!ret)
+                       break;
+               for (i = 0; i < ret; i++)
+                       btrfs_drop_and_free_fs_root(fs_info, gang[i]);
        }
 }
 
@@ -3134,8 +3149,8 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
 
 void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
 {
-       xa_init_flags(&fs_info->fs_roots, GFP_ATOMIC);
-       xa_init_flags(&fs_info->extent_buffers, GFP_ATOMIC);
+       INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
+       INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
        INIT_LIST_HEAD(&fs_info->trans_list);
        INIT_LIST_HEAD(&fs_info->dead_roots);
        INIT_LIST_HEAD(&fs_info->delayed_iputs);
@@ -3143,7 +3158,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
        INIT_LIST_HEAD(&fs_info->caching_block_groups);
        spin_lock_init(&fs_info->delalloc_root_lock);
        spin_lock_init(&fs_info->trans_lock);
-       spin_lock_init(&fs_info->fs_roots_lock);
+       spin_lock_init(&fs_info->fs_roots_radix_lock);
        spin_lock_init(&fs_info->delayed_iput_lock);
        spin_lock_init(&fs_info->defrag_inodes_lock);
        spin_lock_init(&fs_info->super_lock);
@@ -3374,7 +3389,7 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
        /*
         * btrfs_find_orphan_roots() is responsible for finding all the dead
         * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load
-        * them into the fs_info->fs_roots. This must be done before
+        * them into the fs_info->fs_roots_radix tree. This must be done before
         * calling btrfs_orphan_cleanup() on the tree root. If we don't do it
         * first, then btrfs_orphan_cleanup() will delete a dead root's orphan
         * item before the root's tree is deleted - this means that if we unmount
@@ -4499,11 +4514,12 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
 {
        bool drop_ref = false;
 
-       spin_lock(&fs_info->fs_roots_lock);
-       xa_erase(&fs_info->fs_roots, (unsigned long)root->root_key.objectid);
-       if (test_and_clear_bit(BTRFS_ROOT_REGISTERED, &root->state))
+       spin_lock(&fs_info->fs_roots_radix_lock);
+       radix_tree_delete(&fs_info->fs_roots_radix,
+                         (unsigned long)root->root_key.objectid);
+       if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state))
                drop_ref = true;
-       spin_unlock(&fs_info->fs_roots_lock);
+       spin_unlock(&fs_info->fs_roots_radix_lock);
 
        if (BTRFS_FS_ERROR(fs_info)) {
                ASSERT(root->log_root == NULL);
@@ -4519,48 +4535,50 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
 
 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
 {
-       struct btrfs_root *roots[8];
-       unsigned long index = 0;
-       int i;
+       u64 root_objectid = 0;
+       struct btrfs_root *gang[8];
+       int i = 0;
        int err = 0;
-       int grabbed;
+       unsigned int ret = 0;
 
        while (1) {
-               struct btrfs_root *root;
-
-               spin_lock(&fs_info->fs_roots_lock);
-               if (!xa_find(&fs_info->fs_roots, &index, ULONG_MAX, XA_PRESENT)) {
-                       spin_unlock(&fs_info->fs_roots_lock);
-                       return err;
+               spin_lock(&fs_info->fs_roots_radix_lock);
+               ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+                                            (void **)gang, root_objectid,
+                                            ARRAY_SIZE(gang));
+               if (!ret) {
+                       spin_unlock(&fs_info->fs_roots_radix_lock);
+                       break;
                }
+               root_objectid = gang[ret - 1]->root_key.objectid + 1;
 
-               grabbed = 0;
-               xa_for_each_start(&fs_info->fs_roots, index, root, index) {
-                       /* Avoid grabbing roots in dead_roots */
-                       if (btrfs_root_refs(&root->root_item) > 0)
-                               roots[grabbed++] = btrfs_grab_root(root);
-                       if (grabbed >= ARRAY_SIZE(roots))
-                               break;
+               for (i = 0; i < ret; i++) {
+                       /* Avoid to grab roots in dead_roots */
+                       if (btrfs_root_refs(&gang[i]->root_item) == 0) {
+                               gang[i] = NULL;
+                               continue;
+                       }
+                       /* grab all the search result for later use */
+                       gang[i] = btrfs_grab_root(gang[i]);
                }
-               spin_unlock(&fs_info->fs_roots_lock);
+               spin_unlock(&fs_info->fs_roots_radix_lock);
 
-               for (i = 0; i < grabbed; i++) {
-                       if (!roots[i])
+               for (i = 0; i < ret; i++) {
+                       if (!gang[i])
                                continue;
-                       index = roots[i]->root_key.objectid;
-                       err = btrfs_orphan_cleanup(roots[i]);
+                       root_objectid = gang[i]->root_key.objectid;
+                       err = btrfs_orphan_cleanup(gang[i]);
                        if (err)
-                               goto out;
-                       btrfs_put_root(roots[i]);
+                               break;
+                       btrfs_put_root(gang[i]);
                }
-               index++;
+               root_objectid++;
        }
 
-out:
-       /* Release the roots that remain uncleaned due to error */
-       for (; i < grabbed; i++) {
-               if (roots[i])
-                       btrfs_put_root(roots[i]);
+       /* release the uncleaned roots due to error */
+       for (; i < ret; i++) {
+               if (gang[i])
+                       btrfs_put_root(gang[i]);
        }
        return err;
 }
@@ -4632,6 +4650,17 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
        int ret;
 
        set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
+
+       /*
+        * We may have the reclaim task running and relocating a data block group,
+        * in which case it may create delayed iputs. So stop it before we park
+        * the cleaner kthread otherwise we can get new delayed iputs after
+        * parking the cleaner, and that can make the async reclaim task to hang
+        * if it's waiting for delayed iputs to complete, since the cleaner is
+        * parked and can not run delayed iputs - this will make us hang when
+        * trying to stop the async reclaim task.
+        */
+       cancel_work_sync(&fs_info->reclaim_bgs_work);
        /*
         * We don't want the cleaner to start new transactions, add more delayed
         * iputs, etc. while we're closing. We can't use kthread_stop() yet
@@ -4672,8 +4701,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
        cancel_work_sync(&fs_info->async_data_reclaim_work);
        cancel_work_sync(&fs_info->preempt_reclaim_work);
 
-       cancel_work_sync(&fs_info->reclaim_bgs_work);
-
        /* Cancel or finish ongoing discard work */
        btrfs_discard_cleanup(fs_info);
 
@@ -4870,28 +4897,31 @@ static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
 
 static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
 {
-       unsigned long index = 0;
-       int grabbed = 0;
-       struct btrfs_root *roots[8];
+       struct btrfs_root *gang[8];
+       u64 root_objectid = 0;
+       int ret;
+
+       spin_lock(&fs_info->fs_roots_radix_lock);
+       while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+                                            (void **)gang, root_objectid,
+                                            ARRAY_SIZE(gang))) != 0) {
+               int i;
 
-       spin_lock(&fs_info->fs_roots_lock);
-       while ((grabbed = xa_extract(&fs_info->fs_roots, (void **)roots, index,
-                                    ULONG_MAX, 8, XA_PRESENT))) {
-               for (int i = 0; i < grabbed; i++)
-                       roots[i] = btrfs_grab_root(roots[i]);
-               spin_unlock(&fs_info->fs_roots_lock);
+               for (i = 0; i < ret; i++)
+                       gang[i] = btrfs_grab_root(gang[i]);
+               spin_unlock(&fs_info->fs_roots_radix_lock);
 
-               for (int i = 0; i < grabbed; i++) {
-                       if (!roots[i])
+               for (i = 0; i < ret; i++) {
+                       if (!gang[i])
                                continue;
-                       index = roots[i]->root_key.objectid;
-                       btrfs_free_log(NULL, roots[i]);
-                       btrfs_put_root(roots[i]);
+                       root_objectid = gang[i]->root_key.objectid;
+                       btrfs_free_log(NULL, gang[i]);
+                       btrfs_put_root(gang[i]);
                }
-               index++;
-               spin_lock(&fs_info->fs_roots_lock);
+               root_objectid++;
+               spin_lock(&fs_info->fs_roots_radix_lock);
        }
-       spin_unlock(&fs_info->fs_roots_lock);
+       spin_unlock(&fs_info->fs_roots_radix_lock);
        btrfs_free_log_root_tree(NULL, fs_info);
 }
 
index 0867c5c..a3afc15 100644 (file)
@@ -3832,7 +3832,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
               block_group->start == fs_info->data_reloc_bg ||
               fs_info->data_reloc_bg == 0);
 
-       if (block_group->ro) {
+       if (block_group->ro || block_group->zoned_data_reloc_ongoing) {
                ret = 1;
                goto out;
        }
@@ -3894,8 +3894,24 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
 out:
        if (ret && ffe_ctl->for_treelog)
                fs_info->treelog_bg = 0;
-       if (ret && ffe_ctl->for_data_reloc)
+       if (ret && ffe_ctl->for_data_reloc &&
+           fs_info->data_reloc_bg == block_group->start) {
+               /*
+                * Do not allow further allocations from this block group.
+                * Compared to increasing the ->ro, setting the
+                * ->zoned_data_reloc_ongoing flag still allows nocow
+                *  writers to come in. See btrfs_inc_nocow_writers().
+                *
+                * We need to disable an allocation to avoid an allocation of
+                * regular (non-relocation data) extent. With mix of relocation
+                * extents and regular extents, we can dispatch WRITE commands
+                * (for relocation extents) and ZONE APPEND commands (for
+                * regular extents) at the same time to the same zone, which
+                * easily break the write pointer.
+                */
+               block_group->zoned_data_reloc_ongoing = 1;
                fs_info->data_reloc_bg = 0;
+       }
        spin_unlock(&fs_info->relocation_bg_lock);
        spin_unlock(&fs_info->treelog_bg_lock);
        spin_unlock(&block_group->lock);
@@ -5813,7 +5829,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
        btrfs_qgroup_convert_reserved_meta(root, INT_MAX);
        btrfs_qgroup_free_meta_all_pertrans(root);
 
-       if (test_bit(BTRFS_ROOT_REGISTERED, &root->state))
+       if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state))
                btrfs_add_dropped_root(trans, root);
        else
                btrfs_put_root(root);
index 8f6b544..f03ab5d 100644 (file)
@@ -2966,7 +2966,7 @@ static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
 }
 
 /*
- * Find extent buffer for a given bytenr.
+ * Find extent buffer for a givne bytenr.
  *
  * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking
  * in endio context.
@@ -2985,9 +2985,11 @@ static struct extent_buffer *find_extent_buffer_readpage(
                return (struct extent_buffer *)page->private;
        }
 
-       /* For subpage case, we need to lookup extent buffer xarray */
-       eb = xa_load(&fs_info->extent_buffers,
-                    bytenr >> fs_info->sectorsize_bits);
+       /* For subpage case, we need to lookup buffer radix tree */
+       rcu_read_lock();
+       eb = radix_tree_lookup(&fs_info->buffer_radix,
+                              bytenr >> fs_info->sectorsize_bits);
+       rcu_read_unlock();
        ASSERT(eb);
        return eb;
 }
@@ -4435,8 +4437,8 @@ static struct extent_buffer *find_extent_buffer_nolock(
        struct extent_buffer *eb;
 
        rcu_read_lock();
-       eb = xa_load(&fs_info->extent_buffers,
-                    start >> fs_info->sectorsize_bits);
+       eb = radix_tree_lookup(&fs_info->buffer_radix,
+                              start >> fs_info->sectorsize_bits);
        if (eb && atomic_inc_not_zero(&eb->refs)) {
                rcu_read_unlock();
                return eb;
@@ -5241,13 +5243,14 @@ int extent_writepages(struct address_space *mapping,
         */
        btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
        ret = extent_write_cache_pages(mapping, wbc, &epd);
-       btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
        ASSERT(ret <= 0);
        if (ret < 0) {
+               btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
                end_write_bio(&epd, ret);
                return ret;
        }
        flush_write_bio(&epd);
+       btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
        return ret;
 }
 
@@ -6128,22 +6131,24 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
        if (!eb)
                return ERR_PTR(-ENOMEM);
        eb->fs_info = fs_info;
-
-       do {
-               ret = xa_insert(&fs_info->extent_buffers,
-                               start >> fs_info->sectorsize_bits,
-                               eb, GFP_NOFS);
-               if (ret == -ENOMEM) {
-                       exists = ERR_PTR(ret);
+again:
+       ret = radix_tree_preload(GFP_NOFS);
+       if (ret) {
+               exists = ERR_PTR(ret);
+               goto free_eb;
+       }
+       spin_lock(&fs_info->buffer_lock);
+       ret = radix_tree_insert(&fs_info->buffer_radix,
+                               start >> fs_info->sectorsize_bits, eb);
+       spin_unlock(&fs_info->buffer_lock);
+       radix_tree_preload_end();
+       if (ret == -EEXIST) {
+               exists = find_extent_buffer(fs_info, start);
+               if (exists)
                        goto free_eb;
-               }
-               if (ret == -EBUSY) {
-                       exists = find_extent_buffer(fs_info, start);
-                       if (exists)
-                               goto free_eb;
-               }
-       } while (ret);
-
+               else
+                       goto again;
+       }
        check_buffer_tree_ref(eb);
        set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
 
@@ -6318,22 +6323,25 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
        }
        if (uptodate)
                set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
-
-       do {
-               ret = xa_insert(&fs_info->extent_buffers,
-                               start >> fs_info->sectorsize_bits,
-                               eb, GFP_NOFS);
-               if (ret == -ENOMEM) {
-                       exists = ERR_PTR(ret);
+again:
+       ret = radix_tree_preload(GFP_NOFS);
+       if (ret) {
+               exists = ERR_PTR(ret);
+               goto free_eb;
+       }
+
+       spin_lock(&fs_info->buffer_lock);
+       ret = radix_tree_insert(&fs_info->buffer_radix,
+                               start >> fs_info->sectorsize_bits, eb);
+       spin_unlock(&fs_info->buffer_lock);
+       radix_tree_preload_end();
+       if (ret == -EEXIST) {
+               exists = find_extent_buffer(fs_info, start);
+               if (exists)
                        goto free_eb;
-               }
-               if (ret == -EBUSY) {
-                       exists = find_extent_buffer(fs_info, start);
-                       if (exists)
-                               goto free_eb;
-               }
-       } while (ret);
-
+               else
+                       goto again;
+       }
        /* add one reference for the tree */
        check_buffer_tree_ref(eb);
        set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
@@ -6378,8 +6386,10 @@ static int release_extent_buffer(struct extent_buffer *eb)
 
                        spin_unlock(&eb->refs_lock);
 
-                       xa_erase(&fs_info->extent_buffers,
-                                eb->start >> fs_info->sectorsize_bits);
+                       spin_lock(&fs_info->buffer_lock);
+                       radix_tree_delete(&fs_info->buffer_radix,
+                                         eb->start >> fs_info->sectorsize_bits);
+                       spin_unlock(&fs_info->buffer_lock);
                } else {
                        spin_unlock(&eb->refs_lock);
                }
@@ -7324,25 +7334,42 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
        }
 }
 
+#define GANG_LOOKUP_SIZE       16
 static struct extent_buffer *get_next_extent_buffer(
                struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
 {
-       struct extent_buffer *eb;
-       unsigned long index;
+       struct extent_buffer *gang[GANG_LOOKUP_SIZE];
+       struct extent_buffer *found = NULL;
        u64 page_start = page_offset(page);
+       u64 cur = page_start;
 
        ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
        lockdep_assert_held(&fs_info->buffer_lock);
 
-       xa_for_each_start(&fs_info->extent_buffers, index, eb,
-                         page_start >> fs_info->sectorsize_bits) {
-               if (in_range(eb->start, page_start, PAGE_SIZE))
-                       return eb;
-               else if (eb->start >= page_start + PAGE_SIZE)
-                       /* Already beyond page end */
-                       return NULL;
+       while (cur < page_start + PAGE_SIZE) {
+               int ret;
+               int i;
+
+               ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
+                               (void **)gang, cur >> fs_info->sectorsize_bits,
+                               min_t(unsigned int, GANG_LOOKUP_SIZE,
+                                     PAGE_SIZE / fs_info->nodesize));
+               if (ret == 0)
+                       goto out;
+               for (i = 0; i < ret; i++) {
+                       /* Already beyond page end */
+                       if (gang[i]->start >= page_start + PAGE_SIZE)
+                               goto out;
+                       /* Found one */
+                       if (gang[i]->start >= bytenr) {
+                               found = gang[i];
+                               goto out;
+                       }
+               }
+               cur = gang[ret - 1]->start + gang[ret - 1]->len;
        }
-       return NULL;
+out:
+       return found;
 }
 
 static int try_release_subpage_extent_buffer(struct page *page)
index 1fd827b..9dfde1a 100644 (file)
@@ -2323,25 +2323,62 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
         */
        btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
 
-       if (ret != BTRFS_NO_LOG_SYNC) {
+       if (ret == BTRFS_NO_LOG_SYNC) {
+               ret = btrfs_end_transaction(trans);
+               goto out;
+       }
+
+       /* We successfully logged the inode, attempt to sync the log. */
+       if (!ret) {
+               ret = btrfs_sync_log(trans, root, &ctx);
                if (!ret) {
-                       ret = btrfs_sync_log(trans, root, &ctx);
-                       if (!ret) {
-                               ret = btrfs_end_transaction(trans);
-                               goto out;
-                       }
-               }
-               if (!full_sync) {
-                       ret = btrfs_wait_ordered_range(inode, start, len);
-                       if (ret) {
-                               btrfs_end_transaction(trans);
-                               goto out;
-                       }
+                       ret = btrfs_end_transaction(trans);
+                       goto out;
                }
-               ret = btrfs_commit_transaction(trans);
-       } else {
+       }
+
+       /*
+        * At this point we need to commit the transaction because we had
+        * btrfs_need_log_full_commit() or some other error.
+        *
+        * If we didn't do a full sync we have to stop the trans handle, wait on
+        * the ordered extents, start it again and commit the transaction.  If
+        * we attempt to wait on the ordered extents here we could deadlock with
+        * something like fallocate() that is holding the extent lock trying to
+        * start a transaction while some other thread is trying to commit the
+        * transaction while we (fsync) are currently holding the transaction
+        * open.
+        */
+       if (!full_sync) {
                ret = btrfs_end_transaction(trans);
+               if (ret)
+                       goto out;
+               ret = btrfs_wait_ordered_range(inode, start, len);
+               if (ret)
+                       goto out;
+
+               /*
+                * This is safe to use here because we're only interested in
+                * making sure the transaction that had the ordered extents is
+                * committed.  We aren't waiting on anything past this point,
+                * we're purely getting the transaction and committing it.
+                */
+               trans = btrfs_attach_transaction_barrier(root);
+               if (IS_ERR(trans)) {
+                       ret = PTR_ERR(trans);
+
+                       /*
+                        * We committed the transaction and there's no currently
+                        * running transaction, this means everything we care
+                        * about made it to disk and we are done.
+                        */
+                       if (ret == -ENOENT)
+                               ret = 0;
+                       goto out;
+               }
        }
+
+       ret = btrfs_commit_transaction(trans);
 out:
        ASSERT(list_empty(&ctx.list));
        err = file_check_and_advance_wb_err(file);
@@ -2719,7 +2756,8 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
 
        ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
                                      min_size, false);
-       BUG_ON(ret);
+       if (WARN_ON(ret))
+               goto out_trans;
        trans->block_rsv = rsv;
 
        cur_offset = start;
@@ -2803,6 +2841,25 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
                        extent_info->file_offset += replace_len;
                }
 
+               /*
+                * We are releasing our handle on the transaction, balance the
+                * dirty pages of the btree inode and flush delayed items, and
+                * then get a new transaction handle, which may now point to a
+                * new transaction in case someone else may have committed the
+                * transaction we used to replace/drop file extent items. So
+                * bump the inode's iversion and update mtime and ctime except
+                * if we are called from a dedupe context. This is because a
+                * power failure/crash may happen after the transaction is
+                * committed and before we finish replacing/dropping all the
+                * file extent items we need.
+                */
+               inode_inc_iversion(&inode->vfs_inode);
+
+               if (!extent_info || extent_info->update_times) {
+                       inode->vfs_inode.i_mtime = current_time(&inode->vfs_inode);
+                       inode->vfs_inode.i_ctime = inode->vfs_inode.i_mtime;
+               }
+
                ret = btrfs_update_inode(trans, root, inode);
                if (ret)
                        break;
@@ -2819,7 +2876,8 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
 
                ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
                                              rsv, min_size, false);
-               BUG_ON(ret);    /* shouldn't happen */
+               if (WARN_ON(ret))
+                       break;
                trans->block_rsv = rsv;
 
                cur_offset = drop_args.drop_end;
index 81737ef..d50448b 100644 (file)
@@ -3195,6 +3195,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
                                                ordered_extent->file_offset,
                                                ordered_extent->file_offset +
                                                logical_len);
+               btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr,
+                                                 ordered_extent->disk_num_bytes);
        } else {
                BUG_ON(root == fs_info->tree_root);
                ret = insert_ordered_extent_file_extent(trans, ordered_extent);
@@ -3576,7 +3578,6 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
        u64 last_objectid = 0;
        int ret = 0, nr_unlink = 0;
 
-       /* Bail out if the cleanup is already running. */
        if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))
                return 0;
 
@@ -3659,17 +3660,17 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                         *
                         * btrfs_find_orphan_roots() ran before us, which has
                         * found all deleted roots and loaded them into
-                        * fs_info->fs_roots. So here we can find if an
+                        * fs_info->fs_roots_radix. So here we can find if an
                         * orphan item corresponds to a deleted root by looking
-                        * up the root from that xarray.
+                        * up the root from that radix tree.
                         */
 
-                       spin_lock(&fs_info->fs_roots_lock);
-                       dead_root = xa_load(&fs_info->fs_roots,
-                                           (unsigned long)found_key.objectid);
+                       spin_lock(&fs_info->fs_roots_radix_lock);
+                       dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
+                                                        (unsigned long)found_key.objectid);
                        if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
                                is_dead_root = 1;
-                       spin_unlock(&fs_info->fs_roots_lock);
+                       spin_unlock(&fs_info->fs_roots_radix_lock);
 
                        if (is_dead_root) {
                                /* prevent this orphan from being found again */
@@ -3909,7 +3910,7 @@ cache_index:
         * cache.
         *
         * This is required for both inode re-read from disk and delayed inode
-        * in the delayed_nodes xarray.
+        * in delayed_nodes_tree.
         */
        if (BTRFS_I(inode)->last_trans == fs_info->generation)
                set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
@@ -7679,7 +7680,19 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
        if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
            em->block_start == EXTENT_MAP_INLINE) {
                free_extent_map(em);
-               ret = -ENOTBLK;
+               /*
+                * If we are in a NOWAIT context, return -EAGAIN in order to
+                * fallback to buffered IO. This is not only because we can
+                * block with buffered IO (no support for NOWAIT semantics at
+                * the moment) but also to avoid returning short reads to user
+                * space - this happens if we were able to read some data from
+                * previous non-compressed extents and then when we fallback to
+                * buffered IO, at btrfs_file_read_iter() by calling
+                * filemap_read(), we fail to fault in pages for the read buffer,
+                * in which case filemap_read() returns a short read (the number
+                * of bytes previously read is > 0, so it does not return -EFAULT).
+                */
+               ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK;
                goto unlock_err;
        }
 
@@ -9897,6 +9910,7 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
        extent_info.file_offset = file_offset;
        extent_info.extent_buf = (char *)&stack_fi;
        extent_info.is_new_extent = true;
+       extent_info.update_times = true;
        extent_info.qgroup_reserved = qgroup_released;
        extent_info.insertions = 0;
 
index 313d9d6..33461b4 100644 (file)
@@ -45,7 +45,6 @@ void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting ne
                start_ns = ktime_get_ns();
 
        down_read_nested(&eb->lock, nest);
-       eb->lock_owner = current->pid;
        trace_btrfs_tree_read_lock(eb, start_ns);
 }
 
@@ -62,7 +61,6 @@ void btrfs_tree_read_lock(struct extent_buffer *eb)
 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
 {
        if (down_read_trylock(&eb->lock)) {
-               eb->lock_owner = current->pid;
                trace_btrfs_try_tree_read_lock(eb);
                return 1;
        }
@@ -90,7 +88,6 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
 void btrfs_tree_read_unlock(struct extent_buffer *eb)
 {
        trace_btrfs_tree_read_unlock(eb);
-       eb->lock_owner = 0;
        up_read(&eb->lock);
 }
 
index c39f8b3..a3549d5 100644 (file)
@@ -344,6 +344,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
        int ret;
        const u64 len = olen_aligned;
        u64 last_dest_end = destoff;
+       u64 prev_extent_end = off;
 
        ret = -ENOMEM;
        buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
@@ -363,7 +364,6 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
        key.offset = off;
 
        while (1) {
-               u64 next_key_min_offset = key.offset + 1;
                struct btrfs_file_extent_item *extent;
                u64 extent_gen;
                int type;
@@ -431,14 +431,21 @@ process_slot:
                 * The first search might have left us at an extent item that
                 * ends before our target range's start, can happen if we have
                 * holes and NO_HOLES feature enabled.
+                *
+                * Subsequent searches may leave us on a file range we have
+                * processed before - this happens due to a race with ordered
+                * extent completion for a file range that is outside our source
+                * range, but that range was part of a file extent item that
+                * also covered a leading part of our source range.
                 */
-               if (key.offset + datal <= off) {
+               if (key.offset + datal <= prev_extent_end) {
                        path->slots[0]++;
                        goto process_slot;
                } else if (key.offset >= off + len) {
                        break;
                }
-               next_key_min_offset = key.offset + datal;
+
+               prev_extent_end = key.offset + datal;
                size = btrfs_item_size(leaf, slot);
                read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot),
                                   size);
@@ -489,6 +496,7 @@ process_slot:
                        clone_info.file_offset = new_key.offset;
                        clone_info.extent_buf = buf;
                        clone_info.is_new_extent = false;
+                       clone_info.update_times = !no_time_update;
                        ret = btrfs_replace_file_extents(BTRFS_I(inode), path,
                                        drop_start, new_key.offset + datal - 1,
                                        &clone_info, &trans);
@@ -550,7 +558,7 @@ process_slot:
                        break;
 
                btrfs_release_path(path);
-               key.offset = next_key_min_offset;
+               key.offset = prev_extent_end;
 
                if (fatal_signal_pending(current)) {
                        ret = -EINTR;
index fa56890..c7dea63 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/mount.h>
 #include <linux/xattr.h>
 #include <linux/posix_acl_xattr.h>
+#include <linux/radix-tree.h>
 #include <linux/vmalloc.h>
 #include <linux/string.h>
 #include <linux/compat.h>
@@ -127,7 +128,7 @@ struct send_ctx {
        struct list_head new_refs;
        struct list_head deleted_refs;
 
-       struct xarray name_cache;
+       struct radix_tree_root name_cache;
        struct list_head name_cache_list;
        int name_cache_size;
 
@@ -268,13 +269,14 @@ struct orphan_dir_info {
 struct name_cache_entry {
        struct list_head list;
        /*
-        * On 32bit kernels, xarray has only 32bit indices, but we need to
-        * handle 64bit inums. We use the lower 32bit of the 64bit inum to store
-        * it in the tree. If more than one inum would fall into the same entry,
-        * we use inum_aliases to store the additional entries. inum_aliases is
-        * also used to store entries with the same inum but different generations.
+        * radix_tree has only 32bit entries but we need to handle 64bit inums.
+        * We use the lower 32bit of the 64bit inum to store it in the tree. If
+        * more then one inum would fall into the same entry, we use radix_list
+        * to store the additional entries. radix_list is also used to store
+        * entries where two entries have the same inum but different
+        * generations.
         */
-       struct list_head inum_aliases;
+       struct list_head radix_list;
        u64 ino;
        u64 gen;
        u64 parent_ino;
@@ -2024,9 +2026,9 @@ out:
 }
 
 /*
- * Insert a name cache entry. On 32bit kernels the xarray index is 32bit,
+ * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
  * so we need to do some special handling in case we have clashes. This function
- * takes care of this with the help of name_cache_entry::inum_aliases.
+ * takes care of this with the help of name_cache_entry::radix_list.
  * In case of error, nce is kfreed.
  */
 static int name_cache_insert(struct send_ctx *sctx,
@@ -2035,7 +2037,8 @@ static int name_cache_insert(struct send_ctx *sctx,
        int ret = 0;
        struct list_head *nce_head;
 
-       nce_head = xa_load(&sctx->name_cache, (unsigned long)nce->ino);
+       nce_head = radix_tree_lookup(&sctx->name_cache,
+                       (unsigned long)nce->ino);
        if (!nce_head) {
                nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
                if (!nce_head) {
@@ -2044,14 +2047,14 @@ static int name_cache_insert(struct send_ctx *sctx,
                }
                INIT_LIST_HEAD(nce_head);
 
-               ret = xa_insert(&sctx->name_cache, nce->ino, nce_head, GFP_KERNEL);
+               ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
                if (ret < 0) {
                        kfree(nce_head);
                        kfree(nce);
                        return ret;
                }
        }
-       list_add_tail(&nce->inum_aliases, nce_head);
+       list_add_tail(&nce->radix_list, nce_head);
        list_add_tail(&nce->list, &sctx->name_cache_list);
        sctx->name_cache_size++;
 
@@ -2063,14 +2066,15 @@ static void name_cache_delete(struct send_ctx *sctx,
 {
        struct list_head *nce_head;
 
-       nce_head = xa_load(&sctx->name_cache, (unsigned long)nce->ino);
+       nce_head = radix_tree_lookup(&sctx->name_cache,
+                       (unsigned long)nce->ino);
        if (!nce_head) {
                btrfs_err(sctx->send_root->fs_info,
              "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
                        nce->ino, sctx->name_cache_size);
        }
 
-       list_del(&nce->inum_aliases);
+       list_del(&nce->radix_list);
        list_del(&nce->list);
        sctx->name_cache_size--;
 
@@ -2078,7 +2082,7 @@ static void name_cache_delete(struct send_ctx *sctx,
         * We may not get to the final release of nce_head if the lookup fails
         */
        if (nce_head && list_empty(nce_head)) {
-               xa_erase(&sctx->name_cache, (unsigned long)nce->ino);
+               radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
                kfree(nce_head);
        }
 }
@@ -2089,11 +2093,11 @@ static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
        struct list_head *nce_head;
        struct name_cache_entry *cur;
 
-       nce_head = xa_load(&sctx->name_cache, (unsigned long)ino);
+       nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
        if (!nce_head)
                return NULL;
 
-       list_for_each_entry(cur, nce_head, inum_aliases) {
+       list_for_each_entry(cur, nce_head, radix_list) {
                if (cur->ino == ino && cur->gen == gen)
                        return cur;
        }
@@ -7518,7 +7522,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
 
        INIT_LIST_HEAD(&sctx->new_refs);
        INIT_LIST_HEAD(&sctx->deleted_refs);
-       xa_init_flags(&sctx->name_cache, GFP_KERNEL);
+       INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
        INIT_LIST_HEAD(&sctx->name_cache_list);
 
        sctx->flags = arg->flags;
index b1fdc6a..6627dd7 100644 (file)
@@ -763,6 +763,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                                compress_force = false;
                                no_compress++;
                        } else {
+                               btrfs_err(info, "unrecognized compression value %s",
+                                         args[0].from);
                                ret = -EINVAL;
                                goto out;
                        }
@@ -821,8 +823,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                case Opt_thread_pool:
                        ret = match_int(&args[0], &intarg);
                        if (ret) {
+                               btrfs_err(info, "unrecognized thread_pool value %s",
+                                         args[0].from);
                                goto out;
                        } else if (intarg == 0) {
+                               btrfs_err(info, "invalid value 0 for thread_pool");
                                ret = -EINVAL;
                                goto out;
                        }
@@ -883,8 +888,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                        break;
                case Opt_ratio:
                        ret = match_int(&args[0], &intarg);
-                       if (ret)
+                       if (ret) {
+                               btrfs_err(info, "unrecognized metadata_ratio value %s",
+                                         args[0].from);
                                goto out;
+                       }
                        info->metadata_ratio = intarg;
                        btrfs_info(info, "metadata ratio %u",
                                   info->metadata_ratio);
@@ -901,6 +909,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                                btrfs_set_and_info(info, DISCARD_ASYNC,
                                                   "turning on async discard");
                        } else {
+                               btrfs_err(info, "unrecognized discard mode value %s",
+                                         args[0].from);
                                ret = -EINVAL;
                                goto out;
                        }
@@ -933,6 +943,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                                btrfs_set_and_info(info, FREE_SPACE_TREE,
                                                   "enabling free space tree");
                        } else {
+                               btrfs_err(info, "unrecognized space_cache value %s",
+                                         args[0].from);
                                ret = -EINVAL;
                                goto out;
                        }
@@ -1014,8 +1026,12 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                        break;
                case Opt_check_integrity_print_mask:
                        ret = match_int(&args[0], &intarg);
-                       if (ret)
+                       if (ret) {
+                               btrfs_err(info,
+                               "unrecognized check_integrity_print_mask value %s",
+                                       args[0].from);
                                goto out;
+                       }
                        info->check_integrity_print_mask = intarg;
                        btrfs_info(info, "check_integrity_print_mask 0x%x",
                                   info->check_integrity_print_mask);
@@ -1030,13 +1046,15 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                        goto out;
 #endif
                case Opt_fatal_errors:
-                       if (strcmp(args[0].from, "panic") == 0)
+                       if (strcmp(args[0].from, "panic") == 0) {
                                btrfs_set_opt(info->mount_opt,
                                              PANIC_ON_FATAL_ERROR);
-                       else if (strcmp(args[0].from, "bug") == 0)
+                       } else if (strcmp(args[0].from, "bug") == 0) {
                                btrfs_clear_opt(info->mount_opt,
                                              PANIC_ON_FATAL_ERROR);
-                       else {
+                       } else {
+                               btrfs_err(info, "unrecognized fatal_errors value %s",
+                                         args[0].from);
                                ret = -EINVAL;
                                goto out;
                        }
@@ -1044,8 +1062,12 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                case Opt_commit_interval:
                        intarg = 0;
                        ret = match_int(&args[0], &intarg);
-                       if (ret)
+                       if (ret) {
+                               btrfs_err(info, "unrecognized commit_interval value %s",
+                                         args[0].from);
+                               ret = -EINVAL;
                                goto out;
+                       }
                        if (intarg == 0) {
                                btrfs_info(info,
                                           "using default commit interval %us",
@@ -1059,8 +1081,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                        break;
                case Opt_rescue:
                        ret = parse_rescue_options(info, args[0].from);
-                       if (ret < 0)
+                       if (ret < 0) {
+                               btrfs_err(info, "unrecognized rescue value %s",
+                                         args[0].from);
                                goto out;
+                       }
                        break;
 #ifdef CONFIG_BTRFS_DEBUG
                case Opt_fragment_all:
@@ -1985,6 +2010,14 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
        if (ret)
                goto restore;
 
+       /* V1 cache is not supported for subpage mount. */
+       if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
+               btrfs_warn(fs_info,
+       "v1 space cache is not supported for page size %lu with sectorsize %u",
+                          PAGE_SIZE, fs_info->sectorsize);
+               ret = -EINVAL;
+               goto restore;
+       }
        btrfs_remount_begin(fs_info, old_opts, *flags);
        btrfs_resize_thread_pool(fs_info,
                fs_info->thread_pool_size, old_thread_pool_size);
index 1591bfa..d8e56ed 100644 (file)
@@ -150,8 +150,8 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
 
 void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
 {
-       unsigned long index;
-       struct extent_buffer *eb;
+       struct radix_tree_iter iter;
+       void **slot;
        struct btrfs_device *dev, *tmp;
 
        if (!fs_info)
@@ -163,9 +163,25 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
 
        test_mnt->mnt_sb->s_fs_info = NULL;
 
-       xa_for_each(&fs_info->extent_buffers, index, eb) {
+       spin_lock(&fs_info->buffer_lock);
+       radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) {
+               struct extent_buffer *eb;
+
+               eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock);
+               if (!eb)
+                       continue;
+               /* Shouldn't happen but that kind of thinking creates CVE's */
+               if (radix_tree_exception(eb)) {
+                       if (radix_tree_deref_retry(eb))
+                               slot = radix_tree_iter_retry(&iter);
+                       continue;
+               }
+               slot = radix_tree_iter_resume(slot, &iter);
+               spin_unlock(&fs_info->buffer_lock);
                free_extent_buffer_stale(eb);
+               spin_lock(&fs_info->buffer_lock);
        }
+       spin_unlock(&fs_info->buffer_lock);
 
        btrfs_mapping_tree_free(&fs_info->mapping_tree);
        list_for_each_entry_safe(dev, tmp, &fs_info->fs_devices->devices,
@@ -186,7 +202,7 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
        if (!root)
                return;
        /* Will be freed by btrfs_free_fs_roots */
-       if (WARN_ON(test_bit(BTRFS_ROOT_REGISTERED, &root->state)))
+       if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state)))
                return;
        btrfs_global_root_delete(root);
        btrfs_put_root(root);
index 06c0a95..875b801 100644 (file)
@@ -23,7 +23,7 @@
 #include "space-info.h"
 #include "zoned.h"
 
-#define BTRFS_ROOT_TRANS_TAG                           XA_MARK_0
+#define BTRFS_ROOT_TRANS_TAG 0
 
 /*
  * Transaction states and transitions
@@ -437,15 +437,15 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
                 */
                smp_wmb();
 
-               spin_lock(&fs_info->fs_roots_lock);
+               spin_lock(&fs_info->fs_roots_radix_lock);
                if (root->last_trans == trans->transid && !force) {
-                       spin_unlock(&fs_info->fs_roots_lock);
+                       spin_unlock(&fs_info->fs_roots_radix_lock);
                        return 0;
                }
-               xa_set_mark(&fs_info->fs_roots,
-                           (unsigned long)root->root_key.objectid,
-                           BTRFS_ROOT_TRANS_TAG);
-               spin_unlock(&fs_info->fs_roots_lock);
+               radix_tree_tag_set(&fs_info->fs_roots_radix,
+                                  (unsigned long)root->root_key.objectid,
+                                  BTRFS_ROOT_TRANS_TAG);
+               spin_unlock(&fs_info->fs_roots_radix_lock);
                root->last_trans = trans->transid;
 
                /* this is pretty tricky.  We don't want to
@@ -487,9 +487,11 @@ void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
        spin_unlock(&cur_trans->dropped_roots_lock);
 
        /* Make sure we don't try to update the root at commit time */
-       xa_clear_mark(&fs_info->fs_roots,
-                     (unsigned long)root->root_key.objectid,
-                     BTRFS_ROOT_TRANS_TAG);
+       spin_lock(&fs_info->fs_roots_radix_lock);
+       radix_tree_tag_clear(&fs_info->fs_roots_radix,
+                            (unsigned long)root->root_key.objectid,
+                            BTRFS_ROOT_TRANS_TAG);
+       spin_unlock(&fs_info->fs_roots_radix_lock);
 }
 
 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
@@ -1402,8 +1404,9 @@ void btrfs_add_dead_root(struct btrfs_root *root)
 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
-       struct btrfs_root *root;
-       unsigned long index;
+       struct btrfs_root *gang[8];
+       int i;
+       int ret;
 
        /*
         * At this point no one can be using this transaction to modify any tree
@@ -1411,46 +1414,57 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
         */
        ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING);
 
-       spin_lock(&fs_info->fs_roots_lock);
-       xa_for_each_marked(&fs_info->fs_roots, index, root, BTRFS_ROOT_TRANS_TAG) {
-               int ret;
-
-               /*
-                * At this point we can neither have tasks logging inodes
-                * from a root nor trying to commit a log tree.
-                */
-               ASSERT(atomic_read(&root->log_writers) == 0);
-               ASSERT(atomic_read(&root->log_commit[0]) == 0);
-               ASSERT(atomic_read(&root->log_commit[1]) == 0);
-
-               xa_clear_mark(&fs_info->fs_roots,
-                             (unsigned long)root->root_key.objectid,
-                             BTRFS_ROOT_TRANS_TAG);
-               spin_unlock(&fs_info->fs_roots_lock);
-
-               btrfs_free_log(trans, root);
-               ret = btrfs_update_reloc_root(trans, root);
-               if (ret)
-                       return ret;
-
-               /* See comments in should_cow_block() */
-               clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
-               smp_mb__after_atomic();
+       spin_lock(&fs_info->fs_roots_radix_lock);
+       while (1) {
+               ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
+                                                (void **)gang, 0,
+                                                ARRAY_SIZE(gang),
+                                                BTRFS_ROOT_TRANS_TAG);
+               if (ret == 0)
+                       break;
+               for (i = 0; i < ret; i++) {
+                       struct btrfs_root *root = gang[i];
+                       int ret2;
+
+                       /*
+                        * At this point we can neither have tasks logging inodes
+                        * from a root nor trying to commit a log tree.
+                        */
+                       ASSERT(atomic_read(&root->log_writers) == 0);
+                       ASSERT(atomic_read(&root->log_commit[0]) == 0);
+                       ASSERT(atomic_read(&root->log_commit[1]) == 0);
+
+                       radix_tree_tag_clear(&fs_info->fs_roots_radix,
+                                       (unsigned long)root->root_key.objectid,
+                                       BTRFS_ROOT_TRANS_TAG);
+                       spin_unlock(&fs_info->fs_roots_radix_lock);
+
+                       btrfs_free_log(trans, root);
+                       ret2 = btrfs_update_reloc_root(trans, root);
+                       if (ret2)
+                               return ret2;
+
+                       /* see comments in should_cow_block() */
+                       clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
+                       smp_mb__after_atomic();
+
+                       if (root->commit_root != root->node) {
+                               list_add_tail(&root->dirty_list,
+                                       &trans->transaction->switch_commits);
+                               btrfs_set_root_node(&root->root_item,
+                                                   root->node);
+                       }
 
-               if (root->commit_root != root->node) {
-                       list_add_tail(&root->dirty_list,
-                                     &trans->transaction->switch_commits);
-                       btrfs_set_root_node(&root->root_item, root->node);
+                       ret2 = btrfs_update_root(trans, fs_info->tree_root,
+                                               &root->root_key,
+                                               &root->root_item);
+                       if (ret2)
+                               return ret2;
+                       spin_lock(&fs_info->fs_roots_radix_lock);
+                       btrfs_qgroup_free_meta_all_pertrans(root);
                }
-
-               ret = btrfs_update_root(trans, fs_info->tree_root,
-                                       &root->root_key, &root->root_item);
-               if (ret)
-                       return ret;
-               spin_lock(&fs_info->fs_roots_lock);
-               btrfs_qgroup_free_meta_all_pertrans(root);
        }
-       spin_unlock(&fs_info->fs_roots_lock);
+       spin_unlock(&fs_info->fs_roots_radix_lock);
        return 0;
 }
 
index 11237a9..d99026d 100644 (file)
@@ -1735,12 +1735,14 @@ static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
        ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
                               &mapped_length, &bioc);
        if (ret || !bioc || mapped_length < PAGE_SIZE) {
-               btrfs_put_bioc(bioc);
-               return -EIO;
+               ret = -EIO;
+               goto out_put_bioc;
        }
 
-       if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK)
-               return -EINVAL;
+       if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
+               ret = -EINVAL;
+               goto out_put_bioc;
+       }
 
        nofs_flag = memalloc_nofs_save();
        nmirrors = (int)bioc->num_stripes;
@@ -1759,7 +1761,8 @@ static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
                break;
        }
        memalloc_nofs_restore(nofs_flag);
-
+out_put_bioc:
+       btrfs_put_bioc(bioc);
        return ret;
 }
 
@@ -1885,7 +1888,6 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
 {
        struct btrfs_fs_info *fs_info = block_group->fs_info;
        struct map_lookup *map;
-       bool need_zone_finish;
        int ret = 0;
        int i;
 
@@ -1942,12 +1944,6 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
                }
        }
 
-       /*
-        * The block group is not fully allocated, so not fully written yet. We
-        * need to send ZONE_FINISH command to free up an active zone.
-        */
-       need_zone_finish = !btrfs_zoned_bg_is_full(block_group);
-
        block_group->zone_is_active = 0;
        block_group->alloc_offset = block_group->zone_capacity;
        block_group->free_space_ctl->free_space = 0;
@@ -1963,15 +1959,13 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
                if (device->zone_info->max_active_zones == 0)
                        continue;
 
-               if (need_zone_finish) {
-                       ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
-                                              physical >> SECTOR_SHIFT,
-                                              device->zone_info->zone_size >> SECTOR_SHIFT,
-                                              GFP_NOFS);
+               ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
+                                      physical >> SECTOR_SHIFT,
+                                      device->zone_info->zone_size >> SECTOR_SHIFT,
+                                      GFP_NOFS);
 
-                       if (ret)
-                               return ret;
-               }
+               if (ret)
+                       return ret;
 
                btrfs_dev_clear_active_zone(device, physical);
        }
@@ -2139,3 +2133,30 @@ bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
        factor = div64_u64(used * 100, total);
        return factor >= fs_info->bg_reclaim_threshold;
 }
+
+void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
+                                      u64 length)
+{
+       struct btrfs_block_group *block_group;
+
+       if (!btrfs_is_zoned(fs_info))
+               return;
+
+       block_group = btrfs_lookup_block_group(fs_info, logical);
+       /* It should be called on a previous data relocation block group. */
+       ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA));
+
+       spin_lock(&block_group->lock);
+       if (!block_group->zoned_data_reloc_ongoing)
+               goto out;
+
+       /* All relocation extents are written. */
+       if (block_group->start + block_group->alloc_offset == logical + length) {
+               /* Now, release this block group for further allocations. */
+               block_group->zoned_data_reloc_ongoing = 0;
+       }
+
+out:
+       spin_unlock(&block_group->lock);
+       btrfs_put_block_group(block_group);
+}
index bb1a189..6b2eec9 100644 (file)
@@ -77,6 +77,8 @@ void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
 void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg);
 void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info);
 bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info);
+void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
+                                      u64 length);
 #else /* CONFIG_BLK_DEV_ZONED */
 static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
                                     struct blk_zone *zone)
@@ -243,6 +245,9 @@ static inline bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
 {
        return false;
 }
+
+static inline void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info,
+                                                    u64 logical, u64 length) { }
 #endif
 
 static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
index a41ae6e..1fee702 100644 (file)
@@ -21,7 +21,8 @@ static int cachefiles_ondemand_fd_release(struct inode *inode,
         * anon_fd.
         */
        xas_for_each(&xas, req, ULONG_MAX) {
-               if (req->msg.opcode == CACHEFILES_OP_READ) {
+               if (req->msg.object_id == object_id &&
+                   req->msg.opcode == CACHEFILES_OP_READ) {
                        req->error = -EIO;
                        complete(&req->done);
                        xas_store(&xas, NULL);
index 6dee888..d6e5916 100644 (file)
@@ -63,7 +63,7 @@
         (CONGESTION_ON_THRESH(congestion_kb) >> 2))
 
 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
-                                       struct folio *folio, void **_fsdata);
+                                       struct folio **foliop, void **_fsdata);
 
 static inline struct ceph_snap_context *page_snap_context(struct page *page)
 {
@@ -1288,18 +1288,19 @@ ceph_find_incompatible(struct page *page)
 }
 
 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
-                                       struct folio *folio, void **_fsdata)
+                                       struct folio **foliop, void **_fsdata)
 {
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_snap_context *snapc;
 
-       snapc = ceph_find_incompatible(folio_page(folio, 0));
+       snapc = ceph_find_incompatible(folio_page(*foliop, 0));
        if (snapc) {
                int r;
 
-               folio_unlock(folio);
-               folio_put(folio);
+               folio_unlock(*foliop);
+               folio_put(*foliop);
+               *foliop = NULL;
                if (IS_ERR(snapc))
                        return PTR_ERR(snapc);
 
index 38c9303..ac8fd5e 100644 (file)
@@ -4377,6 +4377,7 @@ static void flush_dirty_session_caps(struct ceph_mds_session *s)
                ihold(inode);
                dout("flush_dirty_caps %llx.%llx\n", ceph_vinop(inode));
                spin_unlock(&mdsc->cap_dirty_lock);
+               ceph_wait_on_async_create(inode);
                ceph_check_caps(ci, CHECK_CAPS_FLUSH, NULL);
                iput(inode);
                spin_lock(&mdsc->cap_dirty_lock);
index 1dd995e..2cfbac8 100644 (file)
@@ -162,6 +162,8 @@ cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
                seq_printf(m, "\t\tIPv4: %pI4\n", &ipv4->sin_addr);
        else if (iface->sockaddr.ss_family == AF_INET6)
                seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr);
+       if (!iface->is_active)
+               seq_puts(m, "\t\t[for-cleanup]\n");
 }
 
 static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
@@ -221,6 +223,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
        struct TCP_Server_Info *server;
        struct cifs_ses *ses;
        struct cifs_tcon *tcon;
+       struct cifs_server_iface *iface;
        int c, i, j;
 
        seq_puts(m,
@@ -456,11 +459,10 @@ skip_rdma:
                        if (ses->iface_count)
                                seq_printf(m, "\n\n\tServer interfaces: %zu",
                                           ses->iface_count);
-                       for (j = 0; j < ses->iface_count; j++) {
-                               struct cifs_server_iface *iface;
-
-                               iface = &ses->iface_list[j];
-                               seq_printf(m, "\n\t%d)", j+1);
+                       j = 0;
+                       list_for_each_entry(iface, &ses->iface_list,
+                                                iface_head) {
+                               seq_printf(m, "\n\t%d)", ++j);
                                cifs_dump_iface(m, iface);
                                if (is_ses_using_iface(ses, iface))
                                        seq_puts(m, "\t\t[CONNECTED]\n");
index e773716..a643c84 100644 (file)
@@ -80,6 +80,9 @@
 #define SMB_DNS_RESOLVE_INTERVAL_MIN     120
 #define SMB_DNS_RESOLVE_INTERVAL_DEFAULT 600
 
+/* smb multichannel query server interfaces interval in seconds */
+#define SMB_INTERFACE_POLL_INTERVAL    600
+
 /* maximum number of PDUs in one compound */
 #define MAX_COMPOUND 5
 
@@ -933,15 +936,67 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
 #endif
 
 struct cifs_server_iface {
+       struct list_head iface_head;
+       struct kref refcount;
        size_t speed;
        unsigned int rdma_capable : 1;
        unsigned int rss_capable : 1;
+       unsigned int is_active : 1; /* unset if non existent */
        struct sockaddr_storage sockaddr;
 };
 
+/* release iface when last ref is dropped */
+static inline void
+release_iface(struct kref *ref)
+{
+       struct cifs_server_iface *iface = container_of(ref,
+                                                      struct cifs_server_iface,
+                                                      refcount);
+       list_del_init(&iface->iface_head);
+       kfree(iface);
+}
+
+/*
+ * compare two interfaces a and b
+ * return 0 if everything matches.
+ * return 1 if a has higher link speed, or rdma capable, or rss capable
+ * return -1 otherwise.
+ */
+static inline int
+iface_cmp(struct cifs_server_iface *a, struct cifs_server_iface *b)
+{
+       int cmp_ret = 0;
+
+       WARN_ON(!a || !b);
+       if (a->speed == b->speed) {
+               if (a->rdma_capable == b->rdma_capable) {
+                       if (a->rss_capable == b->rss_capable) {
+                               cmp_ret = memcmp(&a->sockaddr, &b->sockaddr,
+                                                sizeof(a->sockaddr));
+                               if (!cmp_ret)
+                                       return 0;
+                               else if (cmp_ret > 0)
+                                       return 1;
+                               else
+                                       return -1;
+                       } else if (a->rss_capable > b->rss_capable)
+                               return 1;
+                       else
+                               return -1;
+               } else if (a->rdma_capable > b->rdma_capable)
+                       return 1;
+               else
+                       return -1;
+       } else if (a->speed > b->speed)
+               return 1;
+       else
+               return -1;
+}
+
 struct cifs_chan {
        unsigned int in_reconnect : 1; /* if session setup in progress for this channel */
        struct TCP_Server_Info *server;
+       struct cifs_server_iface *iface; /* interface in use */
        __u8 signkey[SMB3_SIGN_KEY_SIZE];
 };
 
@@ -993,7 +1048,7 @@ struct cifs_ses {
         */
        spinlock_t iface_lock;
        /* ========= begin: protected by iface_lock ======== */
-       struct cifs_server_iface *iface_list;
+       struct list_head iface_list;
        size_t iface_count;
        unsigned long iface_last_update; /* jiffies */
        /* ========= end: protected by iface_lock ======== */
@@ -1203,6 +1258,7 @@ struct cifs_tcon {
 #ifdef CONFIG_CIFS_DFS_UPCALL
        struct list_head ulist; /* cache update list */
 #endif
+       struct delayed_work     query_interfaces; /* query interfaces workqueue job */
 };
 
 /*
index 3b7366e..d59aebe 100644 (file)
@@ -636,6 +636,13 @@ cifs_chan_clear_need_reconnect(struct cifs_ses *ses,
 bool
 cifs_chan_needs_reconnect(struct cifs_ses *ses,
                          struct TCP_Server_Info *server);
+bool
+cifs_chan_is_iface_active(struct cifs_ses *ses,
+                         struct TCP_Server_Info *server);
+int
+cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server);
+int
+SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon);
 
 void extract_unc_hostname(const char *unc, const char **h, size_t *len);
 int copy_path_name(char *dst, const char *src);
index 1849e34..386bb52 100644 (file)
@@ -145,6 +145,25 @@ requeue_resolve:
        return rc;
 }
 
+static void smb2_query_server_interfaces(struct work_struct *work)
+{
+       int rc;
+       struct cifs_tcon *tcon = container_of(work,
+                                       struct cifs_tcon,
+                                       query_interfaces.work);
+
+       /*
+        * query server network interfaces, in case they change
+        */
+       rc = SMB3_request_interfaces(0, tcon);
+       if (rc) {
+               cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
+                               __func__, rc);
+       }
+
+       queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+                          (SMB_INTERFACE_POLL_INTERVAL * HZ));
+}
 
 static void cifs_resolve_server(struct work_struct *work)
 {
@@ -217,7 +236,7 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
                                      bool mark_smb_session)
 {
        struct TCP_Server_Info *pserver;
-       struct cifs_ses *ses;
+       struct cifs_ses *ses, *nses;
        struct cifs_tcon *tcon;
 
        /*
@@ -231,7 +250,20 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
 
 
        spin_lock(&cifs_tcp_ses_lock);
-       list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+       list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
+               /* check if iface is still active */
+               if (!cifs_chan_is_iface_active(ses, server)) {
+                       /*
+                        * HACK: drop the lock before calling
+                        * cifs_chan_update_iface to avoid deadlock
+                        */
+                       ses->ses_count++;
+                       spin_unlock(&cifs_tcp_ses_lock);
+                       cifs_chan_update_iface(ses, server);
+                       spin_lock(&cifs_tcp_ses_lock);
+                       ses->ses_count--;
+               }
+
                spin_lock(&ses->chan_lock);
                if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server))
                        goto next_session;
@@ -1886,7 +1918,6 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
        list_del_init(&ses->smb_ses_list);
        spin_unlock(&cifs_tcp_ses_lock);
 
-       spin_lock(&ses->chan_lock);
        chan_count = ses->chan_count;
 
        /* close any extra channels */
@@ -1894,13 +1925,14 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
                int i;
 
                for (i = 1; i < chan_count; i++) {
-                       spin_unlock(&ses->chan_lock);
+                       if (ses->chans[i].iface) {
+                               kref_put(&ses->chans[i].iface->refcount, release_iface);
+                               ses->chans[i].iface = NULL;
+                       }
                        cifs_put_tcp_session(ses->chans[i].server, 0);
-                       spin_lock(&ses->chan_lock);
                        ses->chans[i].server = NULL;
                }
        }
-       spin_unlock(&ses->chan_lock);
 
        sesInfoFree(ses);
        cifs_put_tcp_session(server, 0);
@@ -2270,6 +2302,9 @@ cifs_put_tcon(struct cifs_tcon *tcon)
        list_del_init(&tcon->tcon_list);
        spin_unlock(&cifs_tcp_ses_lock);
 
+       /* cancel polling of interfaces */
+       cancel_delayed_work_sync(&tcon->query_interfaces);
+
        if (tcon->use_witness) {
                int rc;
 
@@ -2507,6 +2542,12 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
        tcon->local_lease = ctx->local_lease;
        INIT_LIST_HEAD(&tcon->pending_opens);
 
+       /* schedule query interfaces poll */
+       INIT_DELAYED_WORK(&tcon->query_interfaces,
+                         smb2_query_server_interfaces);
+       queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+                          (SMB_INTERFACE_POLL_INTERVAL * HZ));
+
        spin_lock(&cifs_tcp_ses_lock);
        list_add(&tcon->tcon_list, &ses->tcon_list);
        spin_unlock(&cifs_tcp_ses_lock);
@@ -3982,10 +4023,16 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
                   struct nls_table *nls_info)
 {
        int rc = -ENOSYS;
+       struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
+       struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
        bool is_binding = false;
 
-
        spin_lock(&cifs_tcp_ses_lock);
+       if (server->dstaddr.ss_family == AF_INET6)
+               scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr);
+       else
+               scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr);
+
        if (ses->ses_status != SES_GOOD &&
            ses->ses_status != SES_NEW &&
            ses->ses_status != SES_NEED_RECON) {
index c69e124..0e84e6f 100644 (file)
@@ -75,6 +75,7 @@ sesInfoAlloc(void)
                INIT_LIST_HEAD(&ret_buf->tcon_list);
                mutex_init(&ret_buf->session_mutex);
                spin_lock_init(&ret_buf->iface_lock);
+               INIT_LIST_HEAD(&ret_buf->iface_list);
                spin_lock_init(&ret_buf->chan_lock);
        }
        return ret_buf;
@@ -83,6 +84,8 @@ sesInfoAlloc(void)
 void
 sesInfoFree(struct cifs_ses *buf_to_free)
 {
+       struct cifs_server_iface *iface = NULL, *niface = NULL;
+
        if (buf_to_free == NULL) {
                cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
                return;
@@ -96,7 +99,11 @@ sesInfoFree(struct cifs_ses *buf_to_free)
        kfree(buf_to_free->user_name);
        kfree(buf_to_free->domainName);
        kfree_sensitive(buf_to_free->auth_key.response);
-       kfree(buf_to_free->iface_list);
+       spin_lock(&buf_to_free->iface_lock);
+       list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list,
+                                iface_head)
+               kref_put(&iface->refcount, release_iface);
+       spin_unlock(&buf_to_free->iface_lock);
        kfree_sensitive(buf_to_free);
 }
 
index 0bece97..02c8b29 100644 (file)
@@ -58,7 +58,7 @@ bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface)
 
        spin_lock(&ses->chan_lock);
        for (i = 0; i < ses->chan_count; i++) {
-               if (is_server_using_iface(ses->chans[i].server, iface)) {
+               if (ses->chans[i].iface == iface) {
                        spin_unlock(&ses->chan_lock);
                        return true;
                }
@@ -81,6 +81,9 @@ cifs_ses_get_chan_index(struct cifs_ses *ses,
        }
 
        /* If we didn't find the channel, it is likely a bug */
+       if (server)
+               cifs_dbg(VFS, "unable to get chan index for server: 0x%llx",
+                        server->conn_id);
        WARN_ON(1);
        return 0;
 }
@@ -143,16 +146,24 @@ cifs_chan_needs_reconnect(struct cifs_ses *ses,
        return CIFS_CHAN_NEEDS_RECONNECT(ses, chan_index);
 }
 
+bool
+cifs_chan_is_iface_active(struct cifs_ses *ses,
+                         struct TCP_Server_Info *server)
+{
+       unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+
+       return ses->chans[chan_index].iface &&
+               ses->chans[chan_index].iface->is_active;
+}
+
 /* returns number of channels added */
 int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
 {
        int old_chan_count, new_chan_count;
        int left;
-       int i = 0;
        int rc = 0;
        int tries = 0;
-       struct cifs_server_iface *ifaces = NULL;
-       size_t iface_count;
+       struct cifs_server_iface *iface = NULL, *niface = NULL;
 
        spin_lock(&ses->chan_lock);
 
@@ -182,32 +193,16 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
        spin_unlock(&ses->chan_lock);
 
        /*
-        * Make a copy of the iface list at the time and use that
-        * instead so as to not hold the iface spinlock for opening
-        * channels
-        */
-       spin_lock(&ses->iface_lock);
-       iface_count = ses->iface_count;
-       if (iface_count <= 0) {
-               spin_unlock(&ses->iface_lock);
-               cifs_dbg(VFS, "no iface list available to open channels\n");
-               return 0;
-       }
-       ifaces = kmemdup(ses->iface_list, iface_count*sizeof(*ifaces),
-                        GFP_ATOMIC);
-       if (!ifaces) {
-               spin_unlock(&ses->iface_lock);
-               return 0;
-       }
-       spin_unlock(&ses->iface_lock);
-
-       /*
         * Keep connecting to same, fastest, iface for all channels as
         * long as its RSS. Try next fastest one if not RSS or channel
         * creation fails.
         */
+       spin_lock(&ses->iface_lock);
+       iface = list_first_entry(&ses->iface_list, struct cifs_server_iface,
+                                iface_head);
+       spin_unlock(&ses->iface_lock);
+
        while (left > 0) {
-               struct cifs_server_iface *iface;
 
                tries++;
                if (tries > 3*ses->chan_max) {
@@ -216,31 +211,128 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
                        break;
                }
 
-               iface = &ifaces[i];
-               if (is_ses_using_iface(ses, iface) && !iface->rss_capable) {
-                       i = (i+1) % iface_count;
-                       continue;
+               spin_lock(&ses->iface_lock);
+               if (!ses->iface_count) {
+                       spin_unlock(&ses->iface_lock);
+                       break;
                }
 
-               rc = cifs_ses_add_channel(cifs_sb, ses, iface);
-               if (rc) {
-                       cifs_dbg(FYI, "failed to open extra channel on iface#%d rc=%d\n",
-                                i, rc);
-                       i = (i+1) % iface_count;
-                       continue;
+               list_for_each_entry_safe_from(iface, niface, &ses->iface_list,
+                                   iface_head) {
+                       /* skip ifaces that are unusable */
+                       if (!iface->is_active ||
+                           (is_ses_using_iface(ses, iface) &&
+                            !iface->rss_capable)) {
+                               continue;
+                       }
+
+                       /* take ref before unlock */
+                       kref_get(&iface->refcount);
+
+                       spin_unlock(&ses->iface_lock);
+                       rc = cifs_ses_add_channel(cifs_sb, ses, iface);
+                       spin_lock(&ses->iface_lock);
+
+                       if (rc) {
+                               cifs_dbg(VFS, "failed to open extra channel on iface:%pIS rc=%d\n",
+                                        &iface->sockaddr,
+                                        rc);
+                               kref_put(&iface->refcount, release_iface);
+                               continue;
+                       }
+
+                       cifs_dbg(FYI, "successfully opened new channel on iface:%pIS\n",
+                                &iface->sockaddr);
+                       break;
                }
+               spin_unlock(&ses->iface_lock);
 
-               cifs_dbg(FYI, "successfully opened new channel on iface#%d\n",
-                        i);
                left--;
                new_chan_count++;
        }
 
-       kfree(ifaces);
        return new_chan_count - old_chan_count;
 }
 
 /*
+ * update the iface for the channel if necessary.
+ * will return 0 when iface is updated, 1 if removed, 2 otherwise
+ * Must be called with chan_lock held.
+ */
+int
+cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+{
+       unsigned int chan_index;
+       struct cifs_server_iface *iface = NULL;
+       struct cifs_server_iface *old_iface = NULL;
+       int rc = 0;
+
+       spin_lock(&ses->chan_lock);
+       chan_index = cifs_ses_get_chan_index(ses, server);
+       if (!chan_index) {
+               spin_unlock(&ses->chan_lock);
+               return 0;
+       }
+
+       if (ses->chans[chan_index].iface) {
+               old_iface = ses->chans[chan_index].iface;
+               if (old_iface->is_active) {
+                       spin_unlock(&ses->chan_lock);
+                       return 1;
+               }
+       }
+       spin_unlock(&ses->chan_lock);
+
+       spin_lock(&ses->iface_lock);
+       /* then look for a new one */
+       list_for_each_entry(iface, &ses->iface_list, iface_head) {
+               if (!iface->is_active ||
+                   (is_ses_using_iface(ses, iface) &&
+                    !iface->rss_capable)) {
+                       continue;
+               }
+               kref_get(&iface->refcount);
+       }
+
+       if (!list_entry_is_head(iface, &ses->iface_list, iface_head)) {
+               rc = 1;
+               iface = NULL;
+               cifs_dbg(FYI, "unable to find a suitable iface\n");
+       }
+
+       /* now drop the ref to the current iface */
+       if (old_iface && iface) {
+               kref_put(&old_iface->refcount, release_iface);
+               cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n",
+                        &old_iface->sockaddr,
+                        &iface->sockaddr);
+       } else if (old_iface) {
+               kref_put(&old_iface->refcount, release_iface);
+               cifs_dbg(FYI, "releasing ref to iface: %pIS\n",
+                        &old_iface->sockaddr);
+       } else {
+               WARN_ON(!iface);
+               cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr);
+       }
+       spin_unlock(&ses->iface_lock);
+
+       spin_lock(&ses->chan_lock);
+       chan_index = cifs_ses_get_chan_index(ses, server);
+       ses->chans[chan_index].iface = iface;
+
+       /* No iface is found. if secondary chan, drop connection */
+       if (!iface && CIFS_SERVER_IS_CHAN(server))
+               ses->chans[chan_index].server = NULL;
+
+       spin_unlock(&ses->chan_lock);
+
+       if (!iface && CIFS_SERVER_IS_CHAN(server))
+               cifs_put_tcp_session(server, false);
+
+       return rc;
+}
+
+/*
  * If server is a channel of ses, return the corresponding enclosing
  * cifs_chan otherwise return NULL.
  */
@@ -352,6 +444,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
                spin_unlock(&ses->chan_lock);
                goto out;
        }
+       chan->iface = iface;
        ses->chan_count++;
        atomic_set(&ses->chan_seq, 0);
 
@@ -381,6 +474,14 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
 
 out:
        if (rc && chan->server) {
+               /*
+                * we should avoid race with these delayed works before we
+                * remove this channel
+                */
+               cancel_delayed_work_sync(&chan->server->echo);
+               cancel_delayed_work_sync(&chan->server->resolve);
+               cancel_delayed_work_sync(&chan->server->reconnect);
+
                spin_lock(&ses->chan_lock);
                /* we rely on all bits beyond chan_count to be clear */
                cifs_chan_clear_need_reconnect(ses, chan->server);
@@ -391,10 +492,9 @@ out:
                 */
                WARN_ON(ses->chan_count < 1);
                spin_unlock(&ses->chan_lock);
-       }
 
-       if (rc && chan->server)
                cifs_put_tcp_session(chan->server, 0);
+       }
 
        return rc;
 }
index 8543caf..8802995 100644 (file)
@@ -512,73 +512,41 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
 static int
 parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
                        size_t buf_len,
-                       struct cifs_server_iface **iface_list,
-                       size_t *iface_count)
+                       struct cifs_ses *ses)
 {
        struct network_interface_info_ioctl_rsp *p;
        struct sockaddr_in *addr4;
        struct sockaddr_in6 *addr6;
        struct iface_info_ipv4 *p4;
        struct iface_info_ipv6 *p6;
-       struct cifs_server_iface *info;
+       struct cifs_server_iface *info = NULL, *iface = NULL, *niface = NULL;
+       struct cifs_server_iface tmp_iface;
        ssize_t bytes_left;
        size_t next = 0;
        int nb_iface = 0;
-       int rc = 0;
-
-       *iface_list = NULL;
-       *iface_count = 0;
-
-       /*
-        * Fist pass: count and sanity check
-        */
+       int rc = 0, ret = 0;
 
        bytes_left = buf_len;
        p = buf;
-       while (bytes_left >= sizeof(*p)) {
-               nb_iface++;
-               next = le32_to_cpu(p->Next);
-               if (!next) {
-                       bytes_left -= sizeof(*p);
-                       break;
-               }
-               p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
-               bytes_left -= next;
-       }
-
-       if (!nb_iface) {
-               cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
-               rc = -EINVAL;
-               goto out;
-       }
-
-       /* Azure rounds the buffer size up 8, to a 16 byte boundary */
-       if ((bytes_left > 8) || p->Next)
-               cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
-
 
+       spin_lock(&ses->iface_lock);
        /*
-        * Second pass: extract info to internal structure
+        * Go through iface_list and do kref_put to remove
+        * any unused ifaces. ifaces in use will be removed
+        * when the last user calls a kref_put on it
         */
-
-       *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
-       if (!*iface_list) {
-               rc = -ENOMEM;
-               goto out;
+       list_for_each_entry_safe(iface, niface, &ses->iface_list,
+                                iface_head) {
+               iface->is_active = 0;
+               kref_put(&iface->refcount, release_iface);
        }
+       spin_unlock(&ses->iface_lock);
 
-       info = *iface_list;
-       bytes_left = buf_len;
-       p = buf;
        while (bytes_left >= sizeof(*p)) {
-               info->speed = le64_to_cpu(p->LinkSpeed);
-               info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
-               info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
-
-               cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
-               cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
-               cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
-                        le32_to_cpu(p->Capability));
+               memset(&tmp_iface, 0, sizeof(tmp_iface));
+               tmp_iface.speed = le64_to_cpu(p->LinkSpeed);
+               tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
+               tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
 
                switch (p->Family) {
                /*
@@ -587,7 +555,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
                 * conversion explicit in case either one changes.
                 */
                case INTERNETWORK:
-                       addr4 = (struct sockaddr_in *)&info->sockaddr;
+                       addr4 = (struct sockaddr_in *)&tmp_iface.sockaddr;
                        p4 = (struct iface_info_ipv4 *)p->Buffer;
                        addr4->sin_family = AF_INET;
                        memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
@@ -599,7 +567,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
                                 &addr4->sin_addr);
                        break;
                case INTERNETWORKV6:
-                       addr6 = (struct sockaddr_in6 *)&info->sockaddr;
+                       addr6 = (struct sockaddr_in6 *)&tmp_iface.sockaddr;
                        p6 = (struct iface_info_ipv6 *)p->Buffer;
                        addr6->sin6_family = AF_INET6;
                        memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
@@ -619,46 +587,96 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
                        goto next_iface;
                }
 
-               (*iface_count)++;
-               info++;
+               /*
+                * The iface_list is assumed to be sorted by speed.
+                * Check if the new interface exists in that list.
+                * NEVER change iface. it could be in use.
+                * Add a new one instead
+                */
+               spin_lock(&ses->iface_lock);
+               iface = niface = NULL;
+               list_for_each_entry_safe(iface, niface, &ses->iface_list,
+                                        iface_head) {
+                       ret = iface_cmp(iface, &tmp_iface);
+                       if (!ret) {
+                               /* just get a ref so that it doesn't get picked/freed */
+                               iface->is_active = 1;
+                               kref_get(&iface->refcount);
+                               spin_unlock(&ses->iface_lock);
+                               goto next_iface;
+                       } else if (ret < 0) {
+                               /* all remaining ifaces are slower */
+                               kref_get(&iface->refcount);
+                               break;
+                       }
+               }
+               spin_unlock(&ses->iface_lock);
+
+               /* no match. insert the entry in the list */
+               info = kmalloc(sizeof(struct cifs_server_iface),
+                              GFP_KERNEL);
+               if (!info) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+               memcpy(info, &tmp_iface, sizeof(tmp_iface));
+
+               /* add this new entry to the list */
+               kref_init(&info->refcount);
+               info->is_active = 1;
+
+               cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, ses->iface_count);
+               cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
+               cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
+                        le32_to_cpu(p->Capability));
+
+               spin_lock(&ses->iface_lock);
+               if (!list_entry_is_head(iface, &ses->iface_list, iface_head)) {
+                       list_add_tail(&info->iface_head, &iface->iface_head);
+                       kref_put(&iface->refcount, release_iface);
+               } else
+                       list_add_tail(&info->iface_head, &ses->iface_list);
+               spin_unlock(&ses->iface_lock);
+
+               ses->iface_count++;
+               ses->iface_last_update = jiffies;
 next_iface:
+               nb_iface++;
                next = le32_to_cpu(p->Next);
-               if (!next)
+               if (!next) {
+                       bytes_left -= sizeof(*p);
                        break;
+               }
                p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
                bytes_left -= next;
        }
 
-       if (!*iface_count) {
+       if (!nb_iface) {
+               cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
                rc = -EINVAL;
                goto out;
        }
 
-out:
-       if (rc) {
-               kfree(*iface_list);
-               *iface_count = 0;
-               *iface_list = NULL;
-       }
-       return rc;
-}
+       /* Azure rounds the buffer size up 8, to a 16 byte boundary */
+       if ((bytes_left > 8) || p->Next)
+               cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
 
-static int compare_iface(const void *ia, const void *ib)
-{
-       const struct cifs_server_iface *a = (struct cifs_server_iface *)ia;
-       const struct cifs_server_iface *b = (struct cifs_server_iface *)ib;
 
-       return a->speed == b->speed ? 0 : (a->speed > b->speed ? -1 : 1);
+       if (!ses->iface_count) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+out:
+       return rc;
 }
 
-static int
+int
 SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
 {
        int rc;
        unsigned int ret_data_len = 0;
        struct network_interface_info_ioctl_rsp *out_buf = NULL;
-       struct cifs_server_iface *iface_list;
-       size_t iface_count;
        struct cifs_ses *ses = tcon->ses;
 
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
@@ -674,21 +692,10 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
                goto out;
        }
 
-       rc = parse_server_interfaces(out_buf, ret_data_len,
-                                    &iface_list, &iface_count);
+       rc = parse_server_interfaces(out_buf, ret_data_len, ses);
        if (rc)
                goto out;
 
-       /* sort interfaces from fastest to slowest */
-       sort(iface_list, iface_count, sizeof(*iface_list), compare_iface, NULL);
-
-       spin_lock(&ses->iface_lock);
-       kfree(ses->iface_list);
-       ses->iface_list = iface_list;
-       ses->iface_count = iface_count;
-       ses->iface_last_update = jiffies;
-       spin_unlock(&ses->iface_lock);
-
 out:
        kfree(out_buf);
        return rc;
index eaf975f..c705de3 100644 (file)
@@ -543,6 +543,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
                      struct TCP_Server_Info *server, unsigned int *total_len)
 {
        char *pneg_ctxt;
+       char *hostname = NULL;
        unsigned int ctxt_len, neg_context_count;
 
        if (*total_len > 200) {
@@ -570,16 +571,25 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
        *total_len += ctxt_len;
        pneg_ctxt += ctxt_len;
 
-       ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
-                                       server->hostname);
-       *total_len += ctxt_len;
-       pneg_ctxt += ctxt_len;
+       /*
+        * secondary channels don't have the hostname field populated
+        * use the hostname field in the primary channel instead
+        */
+       hostname = CIFS_SERVER_IS_CHAN(server) ?
+               server->primary_server->hostname : server->hostname;
+       if (hostname && (hostname[0] != 0)) {
+               ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
+                                             hostname);
+               *total_len += ctxt_len;
+               pneg_ctxt += ctxt_len;
+               neg_context_count = 3;
+       } else
+               neg_context_count = 2;
 
        build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
        *total_len += sizeof(struct smb2_posix_neg_context);
        pneg_ctxt += sizeof(struct smb2_posix_neg_context);
-
-       neg_context_count = 4;
+       neg_context_count++;
 
        if (server->compress_algorithm) {
                build_compression_ctxt((struct smb2_compression_capabilities_context *)
@@ -5154,6 +5164,8 @@ SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        data = &info;
        size = sizeof(struct smb2_file_eof_info);
 
+       trace_smb3_set_eof(xid, persistent_fid, tcon->tid, tcon->ses->Suid, le64_to_cpu(*eof));
+
        return send_set_info(xid, tcon, persistent_fid, volatile_fid,
                        pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE,
                        0, 1, &data, &size);
index 2be5e0c..6b88dc2 100644 (file)
@@ -121,6 +121,44 @@ DEFINE_SMB3_RW_DONE_EVENT(query_dir_done);
 DEFINE_SMB3_RW_DONE_EVENT(zero_done);
 DEFINE_SMB3_RW_DONE_EVENT(falloc_done);
 
+/* For logging successful set EOF (truncate) */
+DECLARE_EVENT_CLASS(smb3_eof_class,
+       TP_PROTO(unsigned int xid,
+               __u64   fid,
+               __u32   tid,
+               __u64   sesid,
+               __u64   offset),
+       TP_ARGS(xid, fid, tid, sesid, offset),
+       TP_STRUCT__entry(
+               __field(unsigned int, xid)
+               __field(__u64, fid)
+               __field(__u32, tid)
+               __field(__u64, sesid)
+               __field(__u64, offset)
+       ),
+       TP_fast_assign(
+               __entry->xid = xid;
+               __entry->fid = fid;
+               __entry->tid = tid;
+               __entry->sesid = sesid;
+               __entry->offset = offset;
+       ),
+       TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx",
+               __entry->xid, __entry->sesid, __entry->tid, __entry->fid,
+               __entry->offset)
+)
+
+#define DEFINE_SMB3_EOF_EVENT(name)         \
+DEFINE_EVENT(smb3_eof_class, smb3_##name,   \
+       TP_PROTO(unsigned int xid,              \
+               __u64   fid,                    \
+               __u32   tid,                    \
+               __u64   sesid,                  \
+               __u64   offset),                \
+       TP_ARGS(xid, fid, tid, sesid, offset))
+
+DEFINE_SMB3_EOF_EVENT(set_eof);
+
 /*
  * For handle based calls other than read and write, and get/set info
  */
index ee92634..1105ce3 100644 (file)
@@ -9,6 +9,15 @@ menuconfig DLM
        A general purpose distributed lock manager for kernel or userspace
        applications.
 
+config DLM_DEPRECATED_API
+       bool "DLM deprecated API"
+       depends on DLM
+       help
+       Enables deprecated DLM timeout features that will be removed in
+        later Linux kernel releases.
+
+       If you are unsure, say N.
+
 config DLM_DEBUG
        bool "DLM debugging"
        depends on DLM
index 3545fda..71dab73 100644 (file)
@@ -9,7 +9,6 @@ dlm-y :=                        ast.o \
                                member.o \
                                memory.o \
                                midcomms.o \
-                               netlink.o \
                                lowcomms.o \
                                plock.o \
                                rcom.o \
@@ -18,5 +17,6 @@ dlm-y :=                      ast.o \
                                requestqueue.o \
                                user.o \
                                util.o 
+dlm-$(CONFIG_DLM_DEPRECATED_API) +=    netlink.o
 dlm-$(CONFIG_DLM_DEBUG) +=     debug_fs.o
 
index bfac462..19ef136 100644 (file)
@@ -255,13 +255,13 @@ void dlm_callback_work(struct work_struct *work)
                if (callbacks[i].flags & DLM_CB_SKIP) {
                        continue;
                } else if (callbacks[i].flags & DLM_CB_BAST) {
-                       bastfn(lkb->lkb_astparam, callbacks[i].mode);
                        trace_dlm_bast(ls, lkb, callbacks[i].mode);
+                       bastfn(lkb->lkb_astparam, callbacks[i].mode);
                } else if (callbacks[i].flags & DLM_CB_CAST) {
                        lkb->lkb_lksb->sb_status = callbacks[i].sb_status;
                        lkb->lkb_lksb->sb_flags = callbacks[i].sb_flags;
+                       trace_dlm_ast(ls, lkb);
                        castfn(lkb->lkb_astparam);
-                       trace_dlm_ast(ls, lkb, lkb->lkb_lksb);
                }
        }
 
index 42eee27..ac8b621 100644 (file)
@@ -75,8 +75,9 @@ struct dlm_cluster {
        unsigned int cl_log_info;
        unsigned int cl_protocol;
        unsigned int cl_mark;
+#ifdef CONFIG_DLM_DEPRECATED_API
        unsigned int cl_timewarn_cs;
-       unsigned int cl_waitwarn_us;
+#endif
        unsigned int cl_new_rsb_count;
        unsigned int cl_recover_callbacks;
        char cl_cluster_name[DLM_LOCKSPACE_LEN];
@@ -102,8 +103,9 @@ enum {
        CLUSTER_ATTR_LOG_INFO,
        CLUSTER_ATTR_PROTOCOL,
        CLUSTER_ATTR_MARK,
+#ifdef CONFIG_DLM_DEPRECATED_API
        CLUSTER_ATTR_TIMEWARN_CS,
-       CLUSTER_ATTR_WAITWARN_US,
+#endif
        CLUSTER_ATTR_NEW_RSB_COUNT,
        CLUSTER_ATTR_RECOVER_CALLBACKS,
        CLUSTER_ATTR_CLUSTER_NAME,
@@ -224,8 +226,9 @@ CLUSTER_ATTR(log_debug, NULL);
 CLUSTER_ATTR(log_info, NULL);
 CLUSTER_ATTR(protocol, dlm_check_protocol_and_dlm_running);
 CLUSTER_ATTR(mark, NULL);
+#ifdef CONFIG_DLM_DEPRECATED_API
 CLUSTER_ATTR(timewarn_cs, dlm_check_zero);
-CLUSTER_ATTR(waitwarn_us, NULL);
+#endif
 CLUSTER_ATTR(new_rsb_count, NULL);
 CLUSTER_ATTR(recover_callbacks, NULL);
 
@@ -240,8 +243,9 @@ static struct configfs_attribute *cluster_attrs[] = {
        [CLUSTER_ATTR_LOG_INFO] = &cluster_attr_log_info,
        [CLUSTER_ATTR_PROTOCOL] = &cluster_attr_protocol,
        [CLUSTER_ATTR_MARK] = &cluster_attr_mark,
+#ifdef CONFIG_DLM_DEPRECATED_API
        [CLUSTER_ATTR_TIMEWARN_CS] = &cluster_attr_timewarn_cs,
-       [CLUSTER_ATTR_WAITWARN_US] = &cluster_attr_waitwarn_us,
+#endif
        [CLUSTER_ATTR_NEW_RSB_COUNT] = &cluster_attr_new_rsb_count,
        [CLUSTER_ATTR_RECOVER_CALLBACKS] = &cluster_attr_recover_callbacks,
        [CLUSTER_ATTR_CLUSTER_NAME] = &cluster_attr_cluster_name,
@@ -432,8 +436,9 @@ static struct config_group *make_cluster(struct config_group *g,
        cl->cl_log_debug = dlm_config.ci_log_debug;
        cl->cl_log_info = dlm_config.ci_log_info;
        cl->cl_protocol = dlm_config.ci_protocol;
+#ifdef CONFIG_DLM_DEPRECATED_API
        cl->cl_timewarn_cs = dlm_config.ci_timewarn_cs;
-       cl->cl_waitwarn_us = dlm_config.ci_waitwarn_us;
+#endif
        cl->cl_new_rsb_count = dlm_config.ci_new_rsb_count;
        cl->cl_recover_callbacks = dlm_config.ci_recover_callbacks;
        memcpy(cl->cl_cluster_name, dlm_config.ci_cluster_name,
@@ -954,8 +959,9 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num)
 #define DEFAULT_LOG_INFO           1
 #define DEFAULT_PROTOCOL           DLM_PROTO_TCP
 #define DEFAULT_MARK               0
+#ifdef CONFIG_DLM_DEPRECATED_API
 #define DEFAULT_TIMEWARN_CS      500 /* 5 sec = 500 centiseconds */
-#define DEFAULT_WAITWARN_US       0
+#endif
 #define DEFAULT_NEW_RSB_COUNT    128
 #define DEFAULT_RECOVER_CALLBACKS  0
 #define DEFAULT_CLUSTER_NAME      ""
@@ -971,8 +977,9 @@ struct dlm_config_info dlm_config = {
        .ci_log_info = DEFAULT_LOG_INFO,
        .ci_protocol = DEFAULT_PROTOCOL,
        .ci_mark = DEFAULT_MARK,
+#ifdef CONFIG_DLM_DEPRECATED_API
        .ci_timewarn_cs = DEFAULT_TIMEWARN_CS,
-       .ci_waitwarn_us = DEFAULT_WAITWARN_US,
+#endif
        .ci_new_rsb_count = DEFAULT_NEW_RSB_COUNT,
        .ci_recover_callbacks = DEFAULT_RECOVER_CALLBACKS,
        .ci_cluster_name = DEFAULT_CLUSTER_NAME
index df92b0a..55c5f2c 100644 (file)
@@ -37,8 +37,9 @@ struct dlm_config_info {
        int ci_log_info;
        int ci_protocol;
        int ci_mark;
+#ifdef CONFIG_DLM_DEPRECATED_API
        int ci_timewarn_cs;
-       int ci_waitwarn_us;
+#endif
        int ci_new_rsb_count;
        int ci_recover_callbacks;
        char ci_cluster_name[DLM_LOCKSPACE_LEN];
index 776c3ed..8aca808 100644 (file)
@@ -145,7 +145,9 @@ struct dlm_args {
        void                    (*bastfn) (void *astparam, int mode);
        int                     mode;
        struct dlm_lksb         *lksb;
+#ifdef CONFIG_DLM_DEPRECATED_API
        unsigned long           timeout;
+#endif
 };
 
 
@@ -203,10 +205,20 @@ struct dlm_args {
 #define DLM_IFL_OVERLAP_UNLOCK  0x00080000
 #define DLM_IFL_OVERLAP_CANCEL  0x00100000
 #define DLM_IFL_ENDOFLIFE      0x00200000
+#ifdef CONFIG_DLM_DEPRECATED_API
 #define DLM_IFL_WATCH_TIMEWARN 0x00400000
 #define DLM_IFL_TIMEOUT_CANCEL 0x00800000
+#endif
 #define DLM_IFL_DEADLOCK_CANCEL        0x01000000
 #define DLM_IFL_STUB_MS                0x02000000 /* magic number for m_flags */
+/* least significant 2 bytes are message changed, they are full transmitted
+ * but at receive side only the 2 bytes LSB will be set.
+ *
+ * Even wireshark dlm dissector does only evaluate the lower bytes and note
+ * that they may not be used on transceiver side, we assume the higher bytes
+ * are for internal use or reserved so long they are not parsed on receiver
+ * side.
+ */
 #define DLM_IFL_USER           0x00000001
 #define DLM_IFL_ORPHAN         0x00000002
 
@@ -249,10 +261,12 @@ struct dlm_lkb {
        struct list_head        lkb_rsb_lookup; /* waiting for rsb lookup */
        struct list_head        lkb_wait_reply; /* waiting for remote reply */
        struct list_head        lkb_ownqueue;   /* list of locks for a process */
-       struct list_head        lkb_time_list;
        ktime_t                 lkb_timestamp;
-       ktime_t                 lkb_wait_time;
+
+#ifdef CONFIG_DLM_DEPRECATED_API
+       struct list_head        lkb_time_list;
        unsigned long           lkb_timeout_cs;
+#endif
 
        struct mutex            lkb_cb_mutex;
        struct work_struct      lkb_cb_work;
@@ -568,8 +582,10 @@ struct dlm_ls {
        struct mutex            ls_orphans_mutex;
        struct list_head        ls_orphans;
 
+#ifdef CONFIG_DLM_DEPRECATED_API
        struct mutex            ls_timeout_mutex;
        struct list_head        ls_timeout;
+#endif
 
        spinlock_t              ls_new_rsb_spin;
        int                     ls_new_rsb_count;
@@ -606,8 +622,8 @@ struct dlm_ls {
 
        wait_queue_head_t       ls_uevent_wait; /* user part of join/leave */
        int                     ls_uevent_result;
-       struct completion       ls_members_done;
-       int                     ls_members_result;
+       struct completion       ls_recovery_done;
+       int                     ls_recovery_result;
 
        struct miscdevice       ls_device;
 
@@ -688,7 +704,9 @@ struct dlm_ls {
 #define LSFL_RCOM_READY                5
 #define LSFL_RCOM_WAIT         6
 #define LSFL_UEVENT_WAIT       7
+#ifdef CONFIG_DLM_DEPRECATED_API
 #define LSFL_TIMEWARN          8
+#endif
 #define LSFL_CB_DELAY          9
 #define LSFL_NODIR             10
 
@@ -741,9 +759,15 @@ static inline int dlm_no_directory(struct dlm_ls *ls)
        return test_bit(LSFL_NODIR, &ls->ls_flags);
 }
 
+#ifdef CONFIG_DLM_DEPRECATED_API
 int dlm_netlink_init(void);
 void dlm_netlink_exit(void);
 void dlm_timeout_warn(struct dlm_lkb *lkb);
+#else
+static inline int dlm_netlink_init(void) { return 0; }
+static inline void dlm_netlink_exit(void) { };
+static inline void dlm_timeout_warn(struct dlm_lkb *lkb) { };
+#endif
 int dlm_plock_init(void);
 void dlm_plock_exit(void);
 
index 226822f..dac7eb7 100644 (file)
@@ -296,12 +296,14 @@ static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
 
        DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
 
+#ifdef CONFIG_DLM_DEPRECATED_API
        /* if the operation was a cancel, then return -DLM_ECANCEL, if a
           timeout caused the cancel then return -ETIMEDOUT */
        if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
                lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
                rv = -ETIMEDOUT;
        }
+#endif
 
        if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
                lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
@@ -1210,7 +1212,9 @@ static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret,
        kref_init(&lkb->lkb_ref);
        INIT_LIST_HEAD(&lkb->lkb_ownqueue);
        INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
+#ifdef CONFIG_DLM_DEPRECATED_API
        INIT_LIST_HEAD(&lkb->lkb_time_list);
+#endif
        INIT_LIST_HEAD(&lkb->lkb_cb_list);
        mutex_init(&lkb->lkb_cb_mutex);
        INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
@@ -1306,6 +1310,13 @@ static inline void hold_lkb(struct dlm_lkb *lkb)
        kref_get(&lkb->lkb_ref);
 }
 
+static void unhold_lkb_assert(struct kref *kref)
+{
+       struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
+
+       DLM_ASSERT(false, dlm_print_lkb(lkb););
+}
+
 /* This is called when we need to remove a reference and are certain
    it's not the last ref.  e.g. del_lkb is always called between a
    find_lkb/put_lkb and is always the inverse of a previous add_lkb.
@@ -1313,9 +1324,7 @@ static inline void hold_lkb(struct dlm_lkb *lkb)
 
 static inline void unhold_lkb(struct dlm_lkb *lkb)
 {
-       int rv;
-       rv = kref_put(&lkb->lkb_ref, kill_lkb);
-       DLM_ASSERT(!rv, dlm_print_lkb(lkb););
+       kref_put(&lkb->lkb_ref, unhold_lkb_assert);
 }
 
 static void lkb_add_ordered(struct list_head *new, struct list_head *head,
@@ -1402,75 +1411,6 @@ static int msg_reply_type(int mstype)
        return -1;
 }
 
-static int nodeid_warned(int nodeid, int num_nodes, int *warned)
-{
-       int i;
-
-       for (i = 0; i < num_nodes; i++) {
-               if (!warned[i]) {
-                       warned[i] = nodeid;
-                       return 0;
-               }
-               if (warned[i] == nodeid)
-                       return 1;
-       }
-       return 0;
-}
-
-void dlm_scan_waiters(struct dlm_ls *ls)
-{
-       struct dlm_lkb *lkb;
-       s64 us;
-       s64 debug_maxus = 0;
-       u32 debug_scanned = 0;
-       u32 debug_expired = 0;
-       int num_nodes = 0;
-       int *warned = NULL;
-
-       if (!dlm_config.ci_waitwarn_us)
-               return;
-
-       mutex_lock(&ls->ls_waiters_mutex);
-
-       list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
-               if (!lkb->lkb_wait_time)
-                       continue;
-
-               debug_scanned++;
-
-               us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
-
-               if (us < dlm_config.ci_waitwarn_us)
-                       continue;
-
-               lkb->lkb_wait_time = 0;
-
-               debug_expired++;
-               if (us > debug_maxus)
-                       debug_maxus = us;
-
-               if (!num_nodes) {
-                       num_nodes = ls->ls_num_nodes;
-                       warned = kcalloc(num_nodes, sizeof(int), GFP_KERNEL);
-               }
-               if (!warned)
-                       continue;
-               if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
-                       continue;
-
-               log_error(ls, "waitwarn %x %lld %d us check connection to "
-                         "node %d", lkb->lkb_id, (long long)us,
-                         dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
-       }
-       mutex_unlock(&ls->ls_waiters_mutex);
-       kfree(warned);
-
-       if (debug_expired)
-               log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
-                         debug_scanned, debug_expired,
-                         dlm_config.ci_waitwarn_us, (long long)debug_maxus);
-}
-
 /* add/remove lkb from global waiters list of lkb's waiting for
    a reply from a remote node */
 
@@ -1514,7 +1454,6 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
 
        lkb->lkb_wait_count++;
        lkb->lkb_wait_type = mstype;
-       lkb->lkb_wait_time = ktime_get();
        lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
        hold_lkb(lkb);
        list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
@@ -1842,6 +1781,7 @@ void dlm_scan_rsbs(struct dlm_ls *ls)
        }
 }
 
+#ifdef CONFIG_DLM_DEPRECATED_API
 static void add_timeout(struct dlm_lkb *lkb)
 {
        struct dlm_ls *ls = lkb->lkb_resource->res_ls;
@@ -1962,17 +1902,11 @@ void dlm_adjust_timeouts(struct dlm_ls *ls)
        list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
                lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
        mutex_unlock(&ls->ls_timeout_mutex);
-
-       if (!dlm_config.ci_waitwarn_us)
-               return;
-
-       mutex_lock(&ls->ls_waiters_mutex);
-       list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
-               if (ktime_to_us(lkb->lkb_wait_time))
-                       lkb->lkb_wait_time = ktime_get();
-       }
-       mutex_unlock(&ls->ls_waiters_mutex);
 }
+#else
+static void add_timeout(struct dlm_lkb *lkb) { }
+static void del_timeout(struct dlm_lkb *lkb) { }
+#endif
 
 /* lkb is master or local copy */
 
@@ -2837,12 +2771,20 @@ static void confirm_master(struct dlm_rsb *r, int error)
        }
 }
 
+#ifdef CONFIG_DLM_DEPRECATED_API
 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
                         int namelen, unsigned long timeout_cs,
                         void (*ast) (void *astparam),
                         void *astparam,
                         void (*bast) (void *astparam, int mode),
                         struct dlm_args *args)
+#else
+static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
+                        int namelen, void (*ast)(void *astparam),
+                        void *astparam,
+                        void (*bast)(void *astparam, int mode),
+                        struct dlm_args *args)
+#endif
 {
        int rv = -EINVAL;
 
@@ -2895,7 +2837,9 @@ static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
        args->astfn = ast;
        args->astparam = astparam;
        args->bastfn = bast;
+#ifdef CONFIG_DLM_DEPRECATED_API
        args->timeout = timeout_cs;
+#endif
        args->mode = mode;
        args->lksb = lksb;
        rv = 0;
@@ -2951,7 +2895,9 @@ static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
        lkb->lkb_lksb = args->lksb;
        lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
        lkb->lkb_ownpid = (int) current->pid;
+#ifdef CONFIG_DLM_DEPRECATED_API
        lkb->lkb_timeout_cs = args->timeout;
+#endif
        rv = 0;
  out:
        if (rv)
@@ -3472,10 +3418,15 @@ int dlm_lock(dlm_lockspace_t *lockspace,
        if (error)
                goto out;
 
-       trace_dlm_lock_start(ls, lkb, mode, flags);
+       trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags);
 
+#ifdef CONFIG_DLM_DEPRECATED_API
        error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
                              astarg, bast, &args);
+#else
+       error = set_lock_args(mode, lksb, flags, namelen, ast, astarg, bast,
+                             &args);
+#endif
        if (error)
                goto out_put;
 
@@ -3487,7 +3438,7 @@ int dlm_lock(dlm_lockspace_t *lockspace,
        if (error == -EINPROGRESS)
                error = 0;
  out_put:
-       trace_dlm_lock_end(ls, lkb, mode, flags, error);
+       trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error);
 
        if (convert || error)
                __put_lkb(ls, lkb);
@@ -5839,9 +5790,14 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
        return 0;
 }
 
+#ifdef CONFIG_DLM_DEPRECATED_API
 int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
                     int mode, uint32_t flags, void *name, unsigned int namelen,
                     unsigned long timeout_cs)
+#else
+int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
+                    int mode, uint32_t flags, void *name, unsigned int namelen)
+#endif
 {
        struct dlm_lkb *lkb;
        struct dlm_args args;
@@ -5864,8 +5820,13 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
                        goto out;
                }
        }
+#ifdef CONFIG_DLM_DEPRECATED_API
        error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
                              fake_astfn, ua, fake_bastfn, &args);
+#else
+       error = set_lock_args(mode, &ua->lksb, flags, namelen, fake_astfn, ua,
+                             fake_bastfn, &args);
+#endif
        if (error) {
                kfree(ua->lksb.sb_lvbptr);
                ua->lksb.sb_lvbptr = NULL;
@@ -5904,9 +5865,14 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
        return error;
 }
 
+#ifdef CONFIG_DLM_DEPRECATED_API
 int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
                     int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
                     unsigned long timeout_cs)
+#else
+int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
+                    int mode, uint32_t flags, uint32_t lkid, char *lvb_in)
+#endif
 {
        struct dlm_lkb *lkb;
        struct dlm_args args;
@@ -5941,8 +5907,13 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
        ua->bastaddr = ua_tmp->bastaddr;
        ua->user_lksb = ua_tmp->user_lksb;
 
+#ifdef CONFIG_DLM_DEPRECATED_API
        error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
                              fake_astfn, ua, fake_bastfn, &args);
+#else
+       error = set_lock_args(mode, &ua->lksb, flags, 0, fake_astfn, ua,
+                             fake_bastfn, &args);
+#endif
        if (error)
                goto out_put;
 
@@ -5966,7 +5937,7 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
 
 int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
                     int mode, uint32_t flags, void *name, unsigned int namelen,
-                    unsigned long timeout_cs, uint32_t *lkid)
+                    uint32_t *lkid)
 {
        struct dlm_lkb *lkb = NULL, *iter;
        struct dlm_user_args *ua;
index 252a589..a7b6474 100644 (file)
@@ -24,9 +24,15 @@ int dlm_put_lkb(struct dlm_lkb *lkb);
 void dlm_scan_rsbs(struct dlm_ls *ls);
 int dlm_lock_recovery_try(struct dlm_ls *ls);
 void dlm_unlock_recovery(struct dlm_ls *ls);
-void dlm_scan_waiters(struct dlm_ls *ls);
+
+#ifdef CONFIG_DLM_DEPRECATED_API
 void dlm_scan_timeout(struct dlm_ls *ls);
 void dlm_adjust_timeouts(struct dlm_ls *ls);
+#else
+static inline void dlm_scan_timeout(struct dlm_ls *ls) { }
+static inline void dlm_adjust_timeouts(struct dlm_ls *ls) { }
+#endif
+
 int dlm_master_lookup(struct dlm_ls *ls, int nodeid, char *name, int len,
                      unsigned int flags, int *r_nodeid, int *result);
 
@@ -41,15 +47,22 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls);
 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc);
 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc);
 
+#ifdef CONFIG_DLM_DEPRECATED_API
 int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, int mode,
        uint32_t flags, void *name, unsigned int namelen,
        unsigned long timeout_cs);
 int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
        int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
        unsigned long timeout_cs);
+#else
+int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, int mode,
+       uint32_t flags, void *name, unsigned int namelen);
+int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
+       int mode, uint32_t flags, uint32_t lkid, char *lvb_in);
+#endif
 int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
        int mode, uint32_t flags, void *name, unsigned int namelen,
-       unsigned long timeout_cs, uint32_t *lkid);
+       uint32_t *lkid);
 int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
        uint32_t flags, uint32_t lkid, char *lvb_in);
 int dlm_user_cancel(struct dlm_ls *ls,  struct dlm_user_args *ua_tmp,
index 19ed41a..3972f4d 100644 (file)
@@ -275,7 +275,6 @@ static int dlm_scand(void *data)
                                ls->ls_scan_time = jiffies;
                                dlm_scan_rsbs(ls);
                                dlm_scan_timeout(ls);
-                               dlm_scan_waiters(ls);
                                dlm_unlock_recovery(ls);
                        } else {
                                ls->ls_scan_time += HZ;
@@ -490,13 +489,28 @@ static int new_lockspace(const char *name, const char *cluster,
                ls->ls_ops_arg = ops_arg;
        }
 
-       if (flags & DLM_LSFL_TIMEWARN)
+#ifdef CONFIG_DLM_DEPRECATED_API
+       if (flags & DLM_LSFL_TIMEWARN) {
+               pr_warn_once("===============================================================\n"
+                            "WARNING: the dlm DLM_LSFL_TIMEWARN flag is being deprecated and\n"
+                            "         will be removed in v6.2!\n"
+                            "         Inclusive DLM_LSFL_TIMEWARN define in UAPI header!\n"
+                            "===============================================================\n");
+
                set_bit(LSFL_TIMEWARN, &ls->ls_flags);
+       }
 
        /* ls_exflags are forced to match among nodes, and we don't
-          need to require all nodes to have some flags set */
+        * need to require all nodes to have some flags set
+        */
        ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS |
                                    DLM_LSFL_NEWEXCL));
+#else
+       /* ls_exflags are forced to match among nodes, and we don't
+        * need to require all nodes to have some flags set
+        */
+       ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL));
+#endif
 
        size = READ_ONCE(dlm_config.ci_rsbtbl_size);
        ls->ls_rsbtbl_size = size;
@@ -527,8 +541,10 @@ static int new_lockspace(const char *name, const char *cluster,
        mutex_init(&ls->ls_waiters_mutex);
        INIT_LIST_HEAD(&ls->ls_orphans);
        mutex_init(&ls->ls_orphans_mutex);
+#ifdef CONFIG_DLM_DEPRECATED_API
        INIT_LIST_HEAD(&ls->ls_timeout);
        mutex_init(&ls->ls_timeout_mutex);
+#endif
 
        INIT_LIST_HEAD(&ls->ls_new_rsb);
        spin_lock_init(&ls->ls_new_rsb_spin);
@@ -548,8 +564,8 @@ static int new_lockspace(const char *name, const char *cluster,
 
        init_waitqueue_head(&ls->ls_uevent_wait);
        ls->ls_uevent_result = 0;
-       init_completion(&ls->ls_members_done);
-       ls->ls_members_result = -1;
+       init_completion(&ls->ls_recovery_done);
+       ls->ls_recovery_result = -1;
 
        mutex_init(&ls->ls_cb_mutex);
        INIT_LIST_HEAD(&ls->ls_cb_delay);
@@ -645,8 +661,9 @@ static int new_lockspace(const char *name, const char *cluster,
        if (error)
                goto out_recoverd;
 
-       wait_for_completion(&ls->ls_members_done);
-       error = ls->ls_members_result;
+       /* wait until recovery is successful or failed */
+       wait_for_completion(&ls->ls_recovery_done);
+       error = ls->ls_recovery_result;
        if (error)
                goto out_members;
 
index 19e82f0..a4e84e8 100644 (file)
@@ -529,7 +529,7 @@ static void lowcomms_write_space(struct sock *sk)
                return;
 
        if (!test_and_set_bit(CF_CONNECTED, &con->flags)) {
-               log_print("successful connected to node %d", con->nodeid);
+               log_print("connected to node %d", con->nodeid);
                queue_work(send_workqueue, &con->swork);
                return;
        }
@@ -1931,7 +1931,7 @@ static int dlm_sctp_connect(struct connection *con, struct socket *sock,
                return ret;
 
        if (!test_and_set_bit(CF_CONNECTED, &con->flags))
-               log_print("successful connected to node %d", con->nodeid);
+               log_print("connected to node %d", con->nodeid);
 
        return 0;
 }
index 98084e0..2af2ccf 100644 (file)
@@ -534,7 +534,11 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
        int i, error, neg = 0, low = -1;
 
        /* previously removed members that we've not finished removing need to
-          count as a negative change so the "neg" recovery steps will happen */
+        * count as a negative change so the "neg" recovery steps will happen
+        *
+        * This functionality must report all member changes to lsops or
+        * midcomms layer and must never return before.
+        */
 
        list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
                log_rinfo(ls, "prev removed member %d", memb->nodeid);
@@ -583,19 +587,6 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
        *neg_out = neg;
 
        error = ping_members(ls);
-       /* error -EINTR means that a new recovery action is triggered.
-        * We ignore this recovery action and let run the new one which might
-        * have new member configuration.
-        */
-       if (error == -EINTR)
-               error = 0;
-
-       /* new_lockspace() may be waiting to know if the config
-        * is good or bad
-        */
-       ls->ls_members_result = error;
-       complete(&ls->ls_members_done);
-
        log_rinfo(ls, "dlm_recover_members %d nodes", ls->ls_num_nodes);
        return error;
 }
@@ -675,7 +666,16 @@ int dlm_ls_stop(struct dlm_ls *ls)
        if (!ls->ls_recover_begin)
                ls->ls_recover_begin = jiffies;
 
-       dlm_lsop_recover_prep(ls);
+       /* call recover_prep ops only once and not multiple times
+        * for each possible dlm_ls_stop() when recovery is already
+        * stopped.
+        *
+        * If we successful was able to clear LSFL_RUNNING bit and
+        * it was set we know it is the first dlm_ls_stop() call.
+        */
+       if (new)
+               dlm_lsop_recover_prep(ls);
+
        return 0;
 }
 
index 0993eeb..737f185 100644 (file)
@@ -29,6 +29,8 @@ struct plock_async_data {
 struct plock_op {
        struct list_head list;
        int done;
+       /* if lock op got interrupted while waiting dlm_controld reply */
+       bool sigint;
        struct dlm_plock_info info;
        /* if set indicates async handling */
        struct plock_async_data *data;
@@ -79,8 +81,7 @@ static void send_op(struct plock_op *op)
    abandoned waiter.  So, we have to insert the unlock-close when the
    lock call is interrupted. */
 
-static void do_unlock_close(struct dlm_ls *ls, u64 number,
-                           struct file *file, struct file_lock *fl)
+static void do_unlock_close(const struct dlm_plock_info *info)
 {
        struct plock_op *op;
 
@@ -89,15 +90,12 @@ static void do_unlock_close(struct dlm_ls *ls, u64 number,
                return;
 
        op->info.optype         = DLM_PLOCK_OP_UNLOCK;
-       op->info.pid            = fl->fl_pid;
-       op->info.fsid           = ls->ls_global_id;
-       op->info.number         = number;
+       op->info.pid            = info->pid;
+       op->info.fsid           = info->fsid;
+       op->info.number         = info->number;
        op->info.start          = 0;
        op->info.end            = OFFSET_MAX;
-       if (fl->fl_lmops && fl->fl_lmops->lm_grant)
-               op->info.owner  = (__u64) fl->fl_pid;
-       else
-               op->info.owner  = (__u64)(long) fl->fl_owner;
+       op->info.owner          = info->owner;
 
        op->info.flags |= DLM_PLOCK_FL_CLOSE;
        send_op(op);
@@ -161,16 +159,24 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
        rv = wait_event_interruptible(recv_wq, (op->done != 0));
        if (rv == -ERESTARTSYS) {
                spin_lock(&ops_lock);
-               list_del(&op->list);
+               /* recheck under ops_lock if we got a done != 0,
+                * if so this interrupt case should be ignored
+                */
+               if (op->done != 0) {
+                       spin_unlock(&ops_lock);
+                       goto do_lock_wait;
+               }
+
+               op->sigint = true;
                spin_unlock(&ops_lock);
-               log_print("%s: wait interrupted %x %llx, op removed",
+               log_debug(ls, "%s: wait interrupted %x %llx pid %d",
                          __func__, ls->ls_global_id,
-                         (unsigned long long)number);
-               dlm_release_plock_op(op);
-               do_unlock_close(ls, number, file, fl);
+                         (unsigned long long)number, op->info.pid);
                goto out;
        }
 
+do_lock_wait:
+
        WARN_ON(!list_empty(&op->list));
 
        rv = op->info.rv;
@@ -378,7 +384,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
 
        spin_lock(&ops_lock);
        if (!list_empty(&send_list)) {
-               op = list_entry(send_list.next, struct plock_op, list);
+               op = list_first_entry(&send_list, struct plock_op, list);
                if (op->info.flags & DLM_PLOCK_FL_CLOSE)
                        list_del(&op->list);
                else
@@ -425,6 +431,19 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
                if (iter->info.fsid == info.fsid &&
                    iter->info.number == info.number &&
                    iter->info.owner == info.owner) {
+                       if (iter->sigint) {
+                               list_del(&iter->list);
+                               spin_unlock(&ops_lock);
+
+                               pr_debug("%s: sigint cleanup %x %llx pid %d",
+                                         __func__, iter->info.fsid,
+                                         (unsigned long long)iter->info.number,
+                                         iter->info.pid);
+                               do_unlock_close(&iter->info);
+                               memcpy(&iter->info, &info, sizeof(info));
+                               dlm_release_plock_op(iter);
+                               return count;
+                       }
                        list_del_init(&iter->list);
                        memcpy(&iter->info, &info, sizeof(info));
                        if (iter->data)
@@ -443,7 +462,7 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
                else
                        wake_up(&recv_wq);
        } else
-               log_print("%s: no op %x %llx - may got interrupted?", __func__,
+               log_print("%s: no op %x %llx", __func__,
                          info.fsid, (unsigned long long)info.number);
        return count;
 }
index a55dfce..e15eb51 100644 (file)
@@ -70,6 +70,10 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
 
        /*
         * Add or remove nodes from the lockspace's ls_nodes list.
+        *
+        * Due to the fact that we must report all membership changes to lsops
+        * or midcomms layer, it is not permitted to abort ls_recover() until
+        * this is done.
         */
 
        error = dlm_recover_members(ls, rv, &neg);
@@ -239,14 +243,12 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
                  jiffies_to_msecs(jiffies - start));
        mutex_unlock(&ls->ls_recoverd_active);
 
-       dlm_lsop_recover_done(ls);
        return 0;
 
  fail:
        dlm_release_root_list(ls);
-       log_rinfo(ls, "dlm_recover %llu error %d",
-                 (unsigned long long)rv->seq, error);
        mutex_unlock(&ls->ls_recoverd_active);
+
        return error;
 }
 
@@ -257,6 +259,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
 static void do_ls_recovery(struct dlm_ls *ls)
 {
        struct dlm_recover *rv = NULL;
+       int error;
 
        spin_lock(&ls->ls_recover_lock);
        rv = ls->ls_recover_args;
@@ -266,7 +269,31 @@ static void do_ls_recovery(struct dlm_ls *ls)
        spin_unlock(&ls->ls_recover_lock);
 
        if (rv) {
-               ls_recover(ls, rv);
+               error = ls_recover(ls, rv);
+               switch (error) {
+               case 0:
+                       ls->ls_recovery_result = 0;
+                       complete(&ls->ls_recovery_done);
+
+                       dlm_lsop_recover_done(ls);
+                       break;
+               case -EINTR:
+                       /* if recovery was interrupted -EINTR we wait for the next
+                        * ls_recover() iteration until it hopefully succeeds.
+                        */
+                       log_rinfo(ls, "%s %llu interrupted and should be queued to run again",
+                                 __func__, (unsigned long long)rv->seq);
+                       break;
+               default:
+                       log_rinfo(ls, "%s %llu error %d", __func__,
+                                 (unsigned long long)rv->seq, error);
+
+                       /* let new_lockspace() get aware of critical error */
+                       ls->ls_recovery_result = error;
+                       complete(&ls->ls_recovery_done);
+                       break;
+               }
+
                kfree(rv->nodes);
                kfree(rv);
        }
index 1060b24..99e8f07 100644 (file)
@@ -250,6 +250,14 @@ static int device_user_lock(struct dlm_user_proc *proc,
                goto out;
        }
 
+#ifdef CONFIG_DLM_DEPRECATED_API
+       if (params->timeout)
+               pr_warn_once("========================================================\n"
+                            "WARNING: the lkb timeout feature is being deprecated and\n"
+                            "         will be removed in v6.2!\n"
+                            "========================================================\n");
+#endif
+
        ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
        if (!ua)
                goto out;
@@ -262,23 +270,34 @@ static int device_user_lock(struct dlm_user_proc *proc,
        ua->xid = params->xid;
 
        if (params->flags & DLM_LKF_CONVERT) {
+#ifdef CONFIG_DLM_DEPRECATED_API
                error = dlm_user_convert(ls, ua,
                                         params->mode, params->flags,
                                         params->lkid, params->lvb,
                                         (unsigned long) params->timeout);
+#else
+               error = dlm_user_convert(ls, ua,
+                                        params->mode, params->flags,
+                                        params->lkid, params->lvb);
+#endif
        } else if (params->flags & DLM_LKF_ORPHAN) {
                error = dlm_user_adopt_orphan(ls, ua,
                                         params->mode, params->flags,
                                         params->name, params->namelen,
-                                        (unsigned long) params->timeout,
                                         &lkid);
                if (!error)
                        error = lkid;
        } else {
+#ifdef CONFIG_DLM_DEPRECATED_API
                error = dlm_user_request(ls, ua,
                                         params->mode, params->flags,
                                         params->name, params->namelen,
                                         (unsigned long) params->timeout);
+#else
+               error = dlm_user_request(ls, ua,
+                                        params->mode, params->flags,
+                                        params->name, params->namelen);
+#endif
                if (!error)
                        error = ua->lksb.sb_lkid;
        }
index 19e6c56..26fa170 100644 (file)
@@ -17,7 +17,7 @@ struct z_erofs_decompress_req {
 
        /* indicate the algorithm will be used for decompression */
        unsigned int alg;
-       bool inplace_io, partial_decoding;
+       bool inplace_io, partial_decoding, fillgaps;
 };
 
 struct z_erofs_decompressor {
index fbb037b..fe8ac0e 100644 (file)
@@ -366,42 +366,33 @@ static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
        return iomap_bmap(mapping, block, &erofs_iomap_ops);
 }
 
-static int erofs_prepare_dio(struct kiocb *iocb, struct iov_iter *to)
+static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
        struct inode *inode = file_inode(iocb->ki_filp);
-       loff_t align = iocb->ki_pos | iov_iter_count(to) |
-               iov_iter_alignment(to);
-       struct block_device *bdev = inode->i_sb->s_bdev;
-       unsigned int blksize_mask;
-
-       if (bdev)
-               blksize_mask = (1 << ilog2(bdev_logical_block_size(bdev))) - 1;
-       else
-               blksize_mask = (1 << inode->i_blkbits) - 1;
 
-       if (align & blksize_mask)
-               return -EINVAL;
-       return 0;
-}
-
-static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
-{
        /* no need taking (shared) inode lock since it's a ro filesystem */
        if (!iov_iter_count(to))
                return 0;
 
 #ifdef CONFIG_FS_DAX
-       if (IS_DAX(iocb->ki_filp->f_mapping->host))
+       if (IS_DAX(inode))
                return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
 #endif
        if (iocb->ki_flags & IOCB_DIRECT) {
-               int err = erofs_prepare_dio(iocb, to);
+               struct block_device *bdev = inode->i_sb->s_bdev;
+               unsigned int blksize_mask;
+
+               if (bdev)
+                       blksize_mask = bdev_logical_block_size(bdev) - 1;
+               else
+                       blksize_mask = (1 << inode->i_blkbits) - 1;
+
+               if ((iocb->ki_pos | iov_iter_count(to) |
+                    iov_iter_alignment(to)) & blksize_mask)
+                       return -EINVAL;
 
-               if (!err)
-                       return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
-                                           NULL, 0, NULL, 0);
-               if (err < 0)
-                       return err;
+               return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
+                                   NULL, 0, NULL, 0);
        }
        return filemap_read(iocb, to, 0);
 }
index 6dca190..2d55569 100644 (file)
@@ -83,7 +83,7 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
                        j = 0;
 
                /* 'valid' bounced can only be tested after a complete round */
-               if (test_bit(j, bounced)) {
+               if (!rq->fillgaps && test_bit(j, bounced)) {
                        DBG_BUGON(i < lz4_max_distance_pages);
                        DBG_BUGON(top >= lz4_max_distance_pages);
                        availables[top++] = rq->out[i - lz4_max_distance_pages];
@@ -91,14 +91,18 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
 
                if (page) {
                        __clear_bit(j, bounced);
-                       if (kaddr) {
-                               if (kaddr + PAGE_SIZE == page_address(page))
+                       if (!PageHighMem(page)) {
+                               if (!i) {
+                                       kaddr = page_address(page);
+                                       continue;
+                               }
+                               if (kaddr &&
+                                   kaddr + PAGE_SIZE == page_address(page)) {
                                        kaddr += PAGE_SIZE;
-                               else
-                                       kaddr = NULL;
-                       } else if (!i) {
-                               kaddr = page_address(page);
+                                       continue;
+                               }
                        }
+                       kaddr = NULL;
                        continue;
                }
                kaddr = NULL;
index 05a3063..5e59b3f 100644 (file)
@@ -143,6 +143,7 @@ again:
        DBG_BUGON(z_erofs_lzma_head);
        z_erofs_lzma_head = head;
        spin_unlock(&z_erofs_lzma_lock);
+       wake_up_all(&z_erofs_lzma_wq);
 
        z_erofs_lzma_max_dictsize = dict_size;
        mutex_unlock(&lzma_resize_mutex);
index 18e5982..ecf28f6 100644 (file)
@@ -22,10 +22,9 @@ static void debug_one_dentry(unsigned char d_type, const char *de_name,
 }
 
 static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx,
-                              void *dentry_blk, unsigned int *ofs,
+                              void *dentry_blk, struct erofs_dirent *de,
                               unsigned int nameoff, unsigned int maxsize)
 {
-       struct erofs_dirent *de = dentry_blk + *ofs;
        const struct erofs_dirent *end = dentry_blk + nameoff;
 
        while (de < end) {
@@ -59,9 +58,8 @@ static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx,
                        /* stopped by some reason */
                        return 1;
                ++de;
-               *ofs += sizeof(struct erofs_dirent);
+               ctx->pos += sizeof(struct erofs_dirent);
        }
-       *ofs = maxsize;
        return 0;
 }
 
@@ -90,33 +88,33 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
 
                nameoff = le16_to_cpu(de->nameoff);
                if (nameoff < sizeof(struct erofs_dirent) ||
-                   nameoff >= PAGE_SIZE) {
+                   nameoff >= EROFS_BLKSIZ) {
                        erofs_err(dir->i_sb,
                                  "invalid de[0].nameoff %u @ nid %llu",
                                  nameoff, EROFS_I(dir)->nid);
                        err = -EFSCORRUPTED;
-                       goto skip_this;
+                       break;
                }
 
                maxsize = min_t(unsigned int,
-                               dirsize - ctx->pos + ofs, PAGE_SIZE);
+                               dirsize - ctx->pos + ofs, EROFS_BLKSIZ);
 
                /* search dirents at the arbitrary position */
                if (initial) {
                        initial = false;
 
                        ofs = roundup(ofs, sizeof(struct erofs_dirent));
+                       ctx->pos = blknr_to_addr(i) + ofs;
                        if (ofs >= nameoff)
                                goto skip_this;
                }
 
-               err = erofs_fill_dentries(dir, ctx, de, &ofs,
+               err = erofs_fill_dentries(dir, ctx, de, (void *)de + ofs,
                                          nameoff, maxsize);
-skip_this:
-               ctx->pos = blknr_to_addr(i) + ofs;
-
                if (err)
                        break;
+skip_this:
+               ctx->pos = blknr_to_addr(i) + maxsize;
                ++i;
                ofs = 0;
        }
index 724bb57..5792ca9 100644 (file)
@@ -2,6 +2,7 @@
 /*
  * Copyright (C) 2018 HUAWEI, Inc.
  *             https://www.huawei.com/
+ * Copyright (C) 2022 Alibaba Cloud
  */
 #include "zdata.h"
 #include "compress.h"
@@ -26,6 +27,82 @@ static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
        _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
 };
 
+struct z_erofs_bvec_iter {
+       struct page *bvpage;
+       struct z_erofs_bvset *bvset;
+       unsigned int nr, cur;
+};
+
+static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter)
+{
+       if (iter->bvpage)
+               kunmap_local(iter->bvset);
+       return iter->bvpage;
+}
+
+static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter)
+{
+       unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec;
+       /* have to access nextpage in advance, otherwise it will be unmapped */
+       struct page *nextpage = iter->bvset->nextpage;
+       struct page *oldpage;
+
+       DBG_BUGON(!nextpage);
+       oldpage = z_erofs_bvec_iter_end(iter);
+       iter->bvpage = nextpage;
+       iter->bvset = kmap_local_page(nextpage);
+       iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec);
+       iter->cur = 0;
+       return oldpage;
+}
+
+static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
+                                   struct z_erofs_bvset_inline *bvset,
+                                   unsigned int bootstrap_nr,
+                                   unsigned int cur)
+{
+       *iter = (struct z_erofs_bvec_iter) {
+               .nr = bootstrap_nr,
+               .bvset = (struct z_erofs_bvset *)bvset,
+       };
+
+       while (cur > iter->nr) {
+               cur -= iter->nr;
+               z_erofs_bvset_flip(iter);
+       }
+       iter->cur = cur;
+}
+
+static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
+                               struct z_erofs_bvec *bvec,
+                               struct page **candidate_bvpage)
+{
+       if (iter->cur == iter->nr) {
+               if (!*candidate_bvpage)
+                       return -EAGAIN;
+
+               DBG_BUGON(iter->bvset->nextpage);
+               iter->bvset->nextpage = *candidate_bvpage;
+               z_erofs_bvset_flip(iter);
+
+               iter->bvset->nextpage = NULL;
+               *candidate_bvpage = NULL;
+       }
+       iter->bvset->bvec[iter->cur++] = *bvec;
+       return 0;
+}
+
+static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter,
+                                struct z_erofs_bvec *bvec,
+                                struct page **old_bvpage)
+{
+       if (iter->cur == iter->nr)
+               *old_bvpage = z_erofs_bvset_flip(iter);
+       else
+               *old_bvpage = NULL;
+       *bvec = iter->bvset->bvec[iter->cur++];
+}
+
 static void z_erofs_destroy_pcluster_pool(void)
 {
        int i;
@@ -46,7 +123,7 @@ static int z_erofs_create_pcluster_pool(void)
 
        for (pcs = pcluster_pool;
             pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
-               size = struct_size(a, compressed_pages, pcs->maxpages);
+               size = struct_size(a, compressed_bvecs, pcs->maxpages);
 
                sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages);
                pcs->slab = kmem_cache_create(pcs->name, size, 0,
@@ -150,30 +227,29 @@ int __init z_erofs_init_zip_subsystem(void)
        return err;
 }
 
-enum z_erofs_collectmode {
-       COLLECT_SECONDARY,
-       COLLECT_PRIMARY,
+enum z_erofs_pclustermode {
+       Z_EROFS_PCLUSTER_INFLIGHT,
        /*
-        * The current collection was the tail of an exist chain, in addition
-        * that the previous processed chained collections are all decided to
+        * The current pclusters was the tail of an exist chain, in addition
+        * that the previous processed chained pclusters are all decided to
         * be hooked up to it.
-        * A new chain will be created for the remaining collections which are
-        * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED,
-        * the next collection cannot reuse the whole page safely in
-        * the following scenario:
+        * A new chain will be created for the remaining pclusters which are
+        * not processed yet, so different from Z_EROFS_PCLUSTER_FOLLOWED,
+        * the next pcluster cannot reuse the whole page safely for inplace I/O
+        * in the following scenario:
         *  ________________________________________________________________
         * |      tail (partial) page     |       head (partial) page       |
-        * |   (belongs to the next cl)   |   (belongs to the current cl)   |
-        * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
+        * |   (belongs to the next pcl)  |   (belongs to the current pcl)  |
+        * |_______PCLUSTER_FOLLOWED______|________PCLUSTER_HOOKED__________|
         */
-       COLLECT_PRIMARY_HOOKED,
+       Z_EROFS_PCLUSTER_HOOKED,
        /*
-        * a weak form of COLLECT_PRIMARY_FOLLOWED, the difference is that it
+        * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
         * could be dispatched into bypass queue later due to uptodated managed
         * pages. All related online pages cannot be reused for inplace I/O (or
-        * pagevec) since it can be directly decoded without I/O submission.
+        * bvpage) since it can be directly decoded without I/O submission.
         */
-       COLLECT_PRIMARY_FOLLOWED_NOINPLACE,
+       Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
        /*
         * The current collection has been linked with the owned chain, and
         * could also be linked with the remaining collections, which means
@@ -184,39 +260,36 @@ enum z_erofs_collectmode {
         *  ________________________________________________________________
         * |  tail (partial) page |          head (partial) page           |
         * |  (of the current cl) |      (of the previous collection)      |
-        * |  PRIMARY_FOLLOWED or |                                        |
-        * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________|
+        * | PCLUSTER_FOLLOWED or |                                        |
+        * |_____PCLUSTER_HOOKED__|___________PCLUSTER_FOLLOWED____________|
         *
         * [  (*) the above page can be used as inplace I/O.               ]
         */
-       COLLECT_PRIMARY_FOLLOWED,
+       Z_EROFS_PCLUSTER_FOLLOWED,
 };
 
 struct z_erofs_decompress_frontend {
        struct inode *const inode;
        struct erofs_map_blocks map;
+       struct z_erofs_bvec_iter biter;
 
-       struct z_erofs_pagevec_ctor vector;
-
+       struct page *candidate_bvpage;
        struct z_erofs_pcluster *pcl, *tailpcl;
-       /* a pointer used to pick up inplace I/O pages */
-       struct page **icpage_ptr;
        z_erofs_next_pcluster_t owned_head;
-
-       enum z_erofs_collectmode mode;
+       enum z_erofs_pclustermode mode;
 
        bool readahead;
        /* used for applying cache strategy on the fly */
        bool backmost;
        erofs_off_t headoffset;
+
+       /* a pointer used to pick up inplace I/O pages */
+       unsigned int icur;
 };
 
 #define DECOMPRESS_FRONTEND_INIT(__i) { \
        .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
-       .mode = COLLECT_PRIMARY_FOLLOWED, .backmost = true }
-
-static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
-static DEFINE_MUTEX(z_pagemap_global_lock);
+       .mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true }
 
 static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
                               enum z_erofs_cache_alloctype type,
@@ -231,24 +304,21 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
         */
        gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
                        __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
-       struct page **pages;
-       pgoff_t index;
+       unsigned int i;
 
-       if (fe->mode < COLLECT_PRIMARY_FOLLOWED)
+       if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
                return;
 
-       pages = pcl->compressed_pages;
-       index = pcl->obj.index;
-       for (; index < pcl->obj.index + pcl->pclusterpages; ++index, ++pages) {
+       for (i = 0; i < pcl->pclusterpages; ++i) {
                struct page *page;
                compressed_page_t t;
                struct page *newpage = NULL;
 
                /* the compressed page was loaded before */
-               if (READ_ONCE(*pages))
+               if (READ_ONCE(pcl->compressed_bvecs[i].page))
                        continue;
 
-               page = find_get_page(mc, index);
+               page = find_get_page(mc, pcl->obj.index + i);
 
                if (page) {
                        t = tag_compressed_page_justfound(page);
@@ -269,7 +339,8 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
                        }
                }
 
-               if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t)))
+               if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL,
+                                    tagptr_cast_ptr(t)))
                        continue;
 
                if (page)
@@ -283,7 +354,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
         * managed cache since it can be moved to the bypass queue instead.
         */
        if (standalone)
-               fe->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
+               fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
 }
 
 /* called by erofs_shrinker to get rid of all compressed_pages */
@@ -300,7 +371,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
         * therefore no need to worry about available decompression users.
         */
        for (i = 0; i < pcl->pclusterpages; ++i) {
-               struct page *page = pcl->compressed_pages[i];
+               struct page *page = pcl->compressed_bvecs[i].page;
 
                if (!page)
                        continue;
@@ -313,7 +384,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
                        continue;
 
                /* barrier is implied in the following 'unlock_page' */
-               WRITE_ONCE(pcl->compressed_pages[i], NULL);
+               WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
                detach_page_private(page);
                unlock_page(page);
        }
@@ -323,56 +394,59 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
 int erofs_try_to_free_cached_page(struct page *page)
 {
        struct z_erofs_pcluster *const pcl = (void *)page_private(page);
-       int ret = 0;    /* 0 - busy */
+       int ret, i;
 
-       if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) {
-               unsigned int i;
+       if (!erofs_workgroup_try_to_freeze(&pcl->obj, 1))
+               return 0;
 
-               DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
-               for (i = 0; i < pcl->pclusterpages; ++i) {
-                       if (pcl->compressed_pages[i] == page) {
-                               WRITE_ONCE(pcl->compressed_pages[i], NULL);
-                               ret = 1;
-                               break;
-                       }
+       ret = 0;
+       DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
+       for (i = 0; i < pcl->pclusterpages; ++i) {
+               if (pcl->compressed_bvecs[i].page == page) {
+                       WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
+                       ret = 1;
+                       break;
                }
-               erofs_workgroup_unfreeze(&pcl->obj, 1);
-
-               if (ret)
-                       detach_page_private(page);
        }
+       erofs_workgroup_unfreeze(&pcl->obj, 1);
+       if (ret)
+               detach_page_private(page);
        return ret;
 }
 
-/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
 static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
-                                  struct page *page)
+                                  struct z_erofs_bvec *bvec)
 {
        struct z_erofs_pcluster *const pcl = fe->pcl;
 
-       while (fe->icpage_ptr > pcl->compressed_pages)
-               if (!cmpxchg(--fe->icpage_ptr, NULL, page))
+       while (fe->icur > 0) {
+               if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page,
+                            NULL, bvec->page)) {
+                       pcl->compressed_bvecs[fe->icur] = *bvec;
                        return true;
+               }
+       }
        return false;
 }
 
 /* callers must be with pcluster lock held */
 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
-                              struct page *page, enum z_erofs_page_type type,
-                              bool pvec_safereuse)
+                              struct z_erofs_bvec *bvec, bool exclusive)
 {
        int ret;
 
-       /* give priority for inplaceio */
-       if (fe->mode >= COLLECT_PRIMARY &&
-           type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
-           z_erofs_try_inplace_io(fe, page))
-               return 0;
-
-       ret = z_erofs_pagevec_enqueue(&fe->vector, page, type,
-                                     pvec_safereuse);
-       fe->pcl->vcnt += (unsigned int)ret;
-       return ret ? 0 : -EAGAIN;
+       if (exclusive) {
+               /* give priority for inplaceio to use file pages first */
+               if (z_erofs_try_inplace_io(fe, bvec))
+                       return 0;
+               /* otherwise, check if it can be used as a bvpage */
+               if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
+                   !fe->candidate_bvpage)
+                       fe->candidate_bvpage = bvec->page;
+       }
+       ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage);
+       fe->pcl->vcnt += (ret >= 0);
+       return ret;
 }
 
 static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
@@ -385,7 +459,7 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
                    *owned_head) == Z_EROFS_PCLUSTER_NIL) {
                *owned_head = &pcl->next;
                /* so we can attach this pcluster to our submission chain. */
-               f->mode = COLLECT_PRIMARY_FOLLOWED;
+               f->mode = Z_EROFS_PCLUSTER_FOLLOWED;
                return;
        }
 
@@ -393,66 +467,21 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
         * type 2, link to the end of an existing open chain, be careful
         * that its submission is controlled by the original attached chain.
         */
-       if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
+       if (*owned_head != &pcl->next && pcl != f->tailpcl &&
+           cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
                    *owned_head) == Z_EROFS_PCLUSTER_TAIL) {
                *owned_head = Z_EROFS_PCLUSTER_TAIL;
-               f->mode = COLLECT_PRIMARY_HOOKED;
+               f->mode = Z_EROFS_PCLUSTER_HOOKED;
                f->tailpcl = NULL;
                return;
        }
        /* type 3, it belongs to a chain, but it isn't the end of the chain */
-       f->mode = COLLECT_PRIMARY;
+       f->mode = Z_EROFS_PCLUSTER_INFLIGHT;
 }
 
-static int z_erofs_lookup_pcluster(struct z_erofs_decompress_frontend *fe,
-                                  struct inode *inode,
-                                  struct erofs_map_blocks *map)
-{
-       struct z_erofs_pcluster *pcl = fe->pcl;
-       unsigned int length;
-
-       /* to avoid unexpected loop formed by corrupted images */
-       if (fe->owned_head == &pcl->next || pcl == fe->tailpcl) {
-               DBG_BUGON(1);
-               return -EFSCORRUPTED;
-       }
-
-       if (pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) {
-               DBG_BUGON(1);
-               return -EFSCORRUPTED;
-       }
-
-       length = READ_ONCE(pcl->length);
-       if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) {
-               if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) {
-                       DBG_BUGON(1);
-                       return -EFSCORRUPTED;
-               }
-       } else {
-               unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT;
-
-               if (map->m_flags & EROFS_MAP_FULL_MAPPED)
-                       llen |= Z_EROFS_PCLUSTER_FULL_LENGTH;
-
-               while (llen > length &&
-                      length != cmpxchg_relaxed(&pcl->length, length, llen)) {
-                       cpu_relax();
-                       length = READ_ONCE(pcl->length);
-               }
-       }
-       mutex_lock(&pcl->lock);
-       /* used to check tail merging loop due to corrupted images */
-       if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
-               fe->tailpcl = pcl;
-
-       z_erofs_try_to_claim_pcluster(fe);
-       return 0;
-}
-
-static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe,
-                                    struct inode *inode,
-                                    struct erofs_map_blocks *map)
+static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
 {
+       struct erofs_map_blocks *map = &fe->map;
        bool ztailpacking = map->m_flags & EROFS_MAP_META;
        struct z_erofs_pcluster *pcl;
        struct erofs_workgroup *grp;
@@ -471,14 +500,13 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe,
 
        atomic_set(&pcl->obj.refcount, 1);
        pcl->algorithmformat = map->m_algorithmformat;
-       pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) |
-               (map->m_flags & EROFS_MAP_FULL_MAPPED ?
-                       Z_EROFS_PCLUSTER_FULL_LENGTH : 0);
+       pcl->length = 0;
+       pcl->partial = true;
 
        /* new pclusters should be claimed as type 1, primary and followed */
        pcl->next = fe->owned_head;
        pcl->pageofs_out = map->m_la & ~PAGE_MASK;
-       fe->mode = COLLECT_PRIMARY_FOLLOWED;
+       fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
 
        /*
         * lock all primary followed works before visible to others
@@ -494,7 +522,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe,
        } else {
                pcl->obj.index = map->m_pa >> PAGE_SHIFT;
 
-               grp = erofs_insert_workgroup(inode->i_sb, &pcl->obj);
+               grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj);
                if (IS_ERR(grp)) {
                        err = PTR_ERR(grp);
                        goto err_out;
@@ -520,11 +548,10 @@ err_out:
        return err;
 }
 
-static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe,
-                                  struct inode *inode,
-                                  struct erofs_map_blocks *map)
+static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe)
 {
-       struct erofs_workgroup *grp;
+       struct erofs_map_blocks *map = &fe->map;
+       struct erofs_workgroup *grp = NULL;
        int ret;
 
        DBG_BUGON(fe->pcl);
@@ -533,38 +560,35 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe,
        DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
        DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
 
-       if (map->m_flags & EROFS_MAP_META) {
-               if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
-                       DBG_BUGON(1);
-                       return -EFSCORRUPTED;
-               }
-               goto tailpacking;
+       if (!(map->m_flags & EROFS_MAP_META)) {
+               grp = erofs_find_workgroup(fe->inode->i_sb,
+                                          map->m_pa >> PAGE_SHIFT);
+       } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
+               DBG_BUGON(1);
+               return -EFSCORRUPTED;
        }
 
-       grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
        if (grp) {
                fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
+               ret = -EEXIST;
        } else {
-tailpacking:
-               ret = z_erofs_register_pcluster(fe, inode, map);
-               if (!ret)
-                       goto out;
-               if (ret != -EEXIST)
-                       return ret;
+               ret = z_erofs_register_pcluster(fe);
        }
 
-       ret = z_erofs_lookup_pcluster(fe, inode, map);
-       if (ret) {
-               erofs_workgroup_put(&fe->pcl->obj);
+       if (ret == -EEXIST) {
+               mutex_lock(&fe->pcl->lock);
+               /* used to check tail merging loop due to corrupted images */
+               if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
+                       fe->tailpcl = fe->pcl;
+
+               z_erofs_try_to_claim_pcluster(fe);
+       } else if (ret) {
                return ret;
        }
-
-out:
-       z_erofs_pagevec_ctor_init(&fe->vector, Z_EROFS_NR_INLINE_PAGEVECS,
-                                 fe->pcl->pagevec, fe->pcl->vcnt);
+       z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
+                               Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
        /* since file-backed online pages are traversed in reverse order */
-       fe->icpage_ptr = fe->pcl->compressed_pages +
-                       z_erofs_pclusterpages(fe->pcl);
+       fe->icur = z_erofs_pclusterpages(fe->pcl);
        return 0;
 }
 
@@ -593,14 +617,19 @@ static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
        if (!pcl)
                return false;
 
-       z_erofs_pagevec_ctor_exit(&fe->vector, false);
+       z_erofs_bvec_iter_end(&fe->biter);
        mutex_unlock(&pcl->lock);
 
+       if (fe->candidate_bvpage) {
+               DBG_BUGON(z_erofs_is_shortlived_page(fe->candidate_bvpage));
+               fe->candidate_bvpage = NULL;
+       }
+
        /*
         * if all pending pages are added, don't hold its reference
         * any longer if the pcluster isn't hosted by ourselves.
         */
-       if (fe->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
+       if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
                erofs_workgroup_put(&pcl->obj);
 
        fe->pcl = NULL;
@@ -628,11 +657,10 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
        struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
        struct erofs_map_blocks *const map = &fe->map;
        const loff_t offset = page_offset(page);
-       bool tight = true;
+       bool tight = true, exclusive;
 
        enum z_erofs_cache_alloctype cache_strategy;
-       enum z_erofs_page_type page_type;
-       unsigned int cur, end, spiltted, index;
+       unsigned int cur, end, spiltted;
        int err = 0;
 
        /* register locked file pages as online pages in pack */
@@ -653,7 +681,7 @@ repeat:
                map->m_llen = 0;
                err = z_erofs_map_blocks_iter(inode, map, 0);
                if (err)
-                       goto err_out;
+                       goto out;
        } else {
                if (fe->pcl)
                        goto hitted;
@@ -663,9 +691,9 @@ repeat:
        if (!(map->m_flags & EROFS_MAP_MAPPED))
                goto hitted;
 
-       err = z_erofs_collector_begin(fe, inode, map);
+       err = z_erofs_collector_begin(fe);
        if (err)
-               goto err_out;
+               goto out;
 
        if (z_erofs_is_inline_pcluster(fe->pcl)) {
                void *mp;
@@ -676,11 +704,12 @@ repeat:
                        err = PTR_ERR(mp);
                        erofs_err(inode->i_sb,
                                  "failed to get inline page, err %d", err);
-                       goto err_out;
+                       goto out;
                }
                get_page(fe->map.buf.page);
-               WRITE_ONCE(fe->pcl->compressed_pages[0], fe->map.buf.page);
-               fe->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
+               WRITE_ONCE(fe->pcl->compressed_bvecs[0].page,
+                          fe->map.buf.page);
+               fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
        } else {
                /* bind cache first when cached decompression is preferred */
                if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy,
@@ -696,10 +725,10 @@ hitted:
         * Ensure the current partial page belongs to this submit chain rather
         * than other concurrent submit chains or the noio(bypass) chain since
         * those chains are handled asynchronously thus the page cannot be used
-        * for inplace I/O or pagevec (should be processed in strict order.)
+        * for inplace I/O or bvpage (should be processed in a strict order.)
         */
-       tight &= (fe->mode >= COLLECT_PRIMARY_HOOKED &&
-                 fe->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
+       tight &= (fe->mode >= Z_EROFS_PCLUSTER_HOOKED &&
+                 fe->mode != Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
 
        cur = end - min_t(unsigned int, offset + end - map->m_la, end);
        if (!(map->m_flags & EROFS_MAP_MAPPED)) {
@@ -707,60 +736,59 @@ hitted:
                goto next_part;
        }
 
-       /* let's derive page type */
-       page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
-               (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
-                       (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
-                               Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
-
+       exclusive = (!cur && (!spiltted || tight));
        if (cur)
-               tight &= (fe->mode >= COLLECT_PRIMARY_FOLLOWED);
+               tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
 
 retry:
-       err = z_erofs_attach_page(fe, page, page_type,
-                                 fe->mode >= COLLECT_PRIMARY_FOLLOWED);
-       /* should allocate an additional short-lived page for pagevec */
-       if (err == -EAGAIN) {
-               struct page *const newpage =
-                               alloc_page(GFP_NOFS | __GFP_NOFAIL);
-
-               set_page_private(newpage, Z_EROFS_SHORTLIVED_PAGE);
-               err = z_erofs_attach_page(fe, newpage,
-                                         Z_EROFS_PAGE_TYPE_EXCLUSIVE, true);
-               if (!err)
-                       goto retry;
+       err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) {
+                                       .page = page,
+                                       .offset = offset - map->m_la,
+                                       .end = end,
+                                 }), exclusive);
+       /* should allocate an additional short-lived page for bvset */
+       if (err == -EAGAIN && !fe->candidate_bvpage) {
+               fe->candidate_bvpage = alloc_page(GFP_NOFS | __GFP_NOFAIL);
+               set_page_private(fe->candidate_bvpage,
+                                Z_EROFS_SHORTLIVED_PAGE);
+               goto retry;
        }
 
-       if (err)
-               goto err_out;
-
-       index = page->index - (map->m_la >> PAGE_SHIFT);
-
-       z_erofs_onlinepage_fixup(page, index, true);
+       if (err) {
+               DBG_BUGON(err == -EAGAIN && fe->candidate_bvpage);
+               goto out;
+       }
 
+       z_erofs_onlinepage_split(page);
        /* bump up the number of spiltted parts of a page */
        ++spiltted;
-       /* also update nr_pages */
-       fe->pcl->nr_pages = max_t(pgoff_t, fe->pcl->nr_pages, index + 1);
+       if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
+               fe->pcl->multibases = true;
+
+       if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
+           fe->pcl->length == map->m_llen)
+               fe->pcl->partial = false;
+       if (fe->pcl->length < offset + end - map->m_la) {
+               fe->pcl->length = offset + end - map->m_la;
+               fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
+       }
 next_part:
-       /* can be used for verification */
+       /* shorten the remaining extent to update progress */
        map->m_llen = offset + cur - map->m_la;
+       map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
 
        end = cur;
        if (end > 0)
                goto repeat;
 
 out:
+       if (err)
+               z_erofs_page_mark_eio(page);
        z_erofs_onlinepage_endio(page);
 
        erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu",
                  __func__, page, spiltted, map->m_llen);
        return err;
-
-       /* if some error occurred while processing this page */
-err_out:
-       SetPageError(page);
-       goto out;
 }
 
 static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
@@ -783,97 +811,137 @@ static bool z_erofs_page_is_invalidated(struct page *page)
        return !page->mapping && !z_erofs_is_shortlived_page(page);
 }
 
-static int z_erofs_decompress_pcluster(struct super_block *sb,
-                                      struct z_erofs_pcluster *pcl,
-                                      struct page **pagepool)
-{
-       struct erofs_sb_info *const sbi = EROFS_SB(sb);
-       unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
-       struct z_erofs_pagevec_ctor ctor;
-       unsigned int i, inputsize, outputsize, llen, nr_pages;
-       struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES];
-       struct page **pages, **compressed_pages, *page;
+struct z_erofs_decompress_backend {
+       struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
+       struct super_block *sb;
+       struct z_erofs_pcluster *pcl;
 
-       enum z_erofs_page_type page_type;
-       bool overlapped, partial;
-       int err;
+       /* pages with the longest decompressed length for deduplication */
+       struct page **decompressed_pages;
+       /* pages to keep the compressed data */
+       struct page **compressed_pages;
 
-       might_sleep();
-       DBG_BUGON(!READ_ONCE(pcl->nr_pages));
+       struct list_head decompressed_secondary_bvecs;
+       struct page **pagepool;
+       unsigned int onstack_used, nr_pages;
+};
 
-       mutex_lock(&pcl->lock);
-       nr_pages = pcl->nr_pages;
+struct z_erofs_bvec_item {
+       struct z_erofs_bvec bvec;
+       struct list_head list;
+};
 
-       if (nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES) {
-               pages = pages_onstack;
-       } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES &&
-                  mutex_trylock(&z_pagemap_global_lock)) {
-               pages = z_pagemap_global;
-       } else {
-               gfp_t gfp_flags = GFP_KERNEL;
+static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
+                                        struct z_erofs_bvec *bvec)
+{
+       struct z_erofs_bvec_item *item;
 
-               if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES)
-                       gfp_flags |= __GFP_NOFAIL;
+       if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK)) {
+               unsigned int pgnr;
+               struct page *oldpage;
 
-               pages = kvmalloc_array(nr_pages, sizeof(struct page *),
-                                      gfp_flags);
+               pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
+               DBG_BUGON(pgnr >= be->nr_pages);
+               oldpage = be->decompressed_pages[pgnr];
+               be->decompressed_pages[pgnr] = bvec->page;
 
-               /* fallback to global pagemap for the lowmem scenario */
-               if (!pages) {
-                       mutex_lock(&z_pagemap_global_lock);
-                       pages = z_pagemap_global;
-               }
+               if (!oldpage)
+                       return;
        }
 
-       for (i = 0; i < nr_pages; ++i)
-               pages[i] = NULL;
-
-       err = 0;
-       z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS,
-                                 pcl->pagevec, 0);
-
-       for (i = 0; i < pcl->vcnt; ++i) {
-               unsigned int pagenr;
+       /* (cold path) one pcluster is requested multiple times */
+       item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL);
+       item->bvec = *bvec;
+       list_add(&item->list, &be->decompressed_secondary_bvecs);
+}
 
-               page = z_erofs_pagevec_dequeue(&ctor, &page_type);
+static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
+                                     int err)
+{
+       unsigned int off0 = be->pcl->pageofs_out;
+       struct list_head *p, *n;
+
+       list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) {
+               struct z_erofs_bvec_item *bvi;
+               unsigned int end, cur;
+               void *dst, *src;
+
+               bvi = container_of(p, struct z_erofs_bvec_item, list);
+               cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0;
+               end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset,
+                           bvi->bvec.end);
+               dst = kmap_local_page(bvi->bvec.page);
+               while (cur < end) {
+                       unsigned int pgnr, scur, len;
+
+                       pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT;
+                       DBG_BUGON(pgnr >= be->nr_pages);
+
+                       scur = bvi->bvec.offset + cur -
+                                       ((pgnr << PAGE_SHIFT) - off0);
+                       len = min_t(unsigned int, end - cur, PAGE_SIZE - scur);
+                       if (!be->decompressed_pages[pgnr]) {
+                               err = -EFSCORRUPTED;
+                               cur += len;
+                               continue;
+                       }
+                       src = kmap_local_page(be->decompressed_pages[pgnr]);
+                       memcpy(dst + cur, src + scur, len);
+                       kunmap_local(src);
+                       cur += len;
+               }
+               kunmap_local(dst);
+               if (err)
+                       z_erofs_page_mark_eio(bvi->bvec.page);
+               z_erofs_onlinepage_endio(bvi->bvec.page);
+               list_del(p);
+               kfree(bvi);
+       }
+}
 
-               /* all pages in pagevec ought to be valid */
-               DBG_BUGON(!page);
-               DBG_BUGON(z_erofs_page_is_invalidated(page));
+static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
+{
+       struct z_erofs_pcluster *pcl = be->pcl;
+       struct z_erofs_bvec_iter biter;
+       struct page *old_bvpage;
+       int i;
 
-               if (z_erofs_put_shortlivedpage(pagepool, page))
-                       continue;
+       z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0);
+       for (i = 0; i < pcl->vcnt; ++i) {
+               struct z_erofs_bvec bvec;
 
-               if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
-                       pagenr = 0;
-               else
-                       pagenr = z_erofs_onlinepage_index(page);
+               z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage);
 
-               DBG_BUGON(pagenr >= nr_pages);
+               if (old_bvpage)
+                       z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
 
-               /*
-                * currently EROFS doesn't support multiref(dedup),
-                * so here erroring out one multiref page.
-                */
-               if (pages[pagenr]) {
-                       DBG_BUGON(1);
-                       SetPageError(pages[pagenr]);
-                       z_erofs_onlinepage_endio(pages[pagenr]);
-                       err = -EFSCORRUPTED;
-               }
-               pages[pagenr] = page;
+               DBG_BUGON(z_erofs_page_is_invalidated(bvec.page));
+               z_erofs_do_decompressed_bvec(be, &bvec);
        }
-       z_erofs_pagevec_ctor_exit(&ctor, true);
 
-       overlapped = false;
-       compressed_pages = pcl->compressed_pages;
+       old_bvpage = z_erofs_bvec_iter_end(&biter);
+       if (old_bvpage)
+               z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
+}
 
+static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
+                                 bool *overlapped)
+{
+       struct z_erofs_pcluster *pcl = be->pcl;
+       unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
+       int i, err = 0;
+
+       *overlapped = false;
        for (i = 0; i < pclusterpages; ++i) {
-               unsigned int pagenr;
+               struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
+               struct page *page = bvec->page;
 
-               page = compressed_pages[i];
-               /* all compressed pages ought to be valid */
-               DBG_BUGON(!page);
+               /* compressed pages ought to be present before decompressing */
+               if (!page) {
+                       DBG_BUGON(1);
+                       continue;
+               }
+               be->compressed_pages[i] = page;
 
                if (z_erofs_is_inline_pcluster(pcl)) {
                        if (!PageUptodate(page))
@@ -883,109 +951,129 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
 
                DBG_BUGON(z_erofs_page_is_invalidated(page));
                if (!z_erofs_is_shortlived_page(page)) {
-                       if (erofs_page_is_managed(sbi, page)) {
+                       if (erofs_page_is_managed(EROFS_SB(be->sb), page)) {
                                if (!PageUptodate(page))
                                        err = -EIO;
                                continue;
                        }
+                       z_erofs_do_decompressed_bvec(be, bvec);
+                       *overlapped = true;
+               }
+       }
 
-                       /*
-                        * only if non-head page can be selected
-                        * for inplace decompression
-                        */
-                       pagenr = z_erofs_onlinepage_index(page);
-
-                       DBG_BUGON(pagenr >= nr_pages);
-                       if (pages[pagenr]) {
-                               DBG_BUGON(1);
-                               SetPageError(pages[pagenr]);
-                               z_erofs_onlinepage_endio(pages[pagenr]);
-                               err = -EFSCORRUPTED;
-                       }
-                       pages[pagenr] = page;
+       if (err)
+               return err;
+       return 0;
+}
 
-                       overlapped = true;
-               }
+static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
+                                      int err)
+{
+       struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
+       struct z_erofs_pcluster *pcl = be->pcl;
+       unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
+       unsigned int i, inputsize;
+       int err2;
+       struct page *page;
+       bool overlapped;
 
-               /* PG_error needs checking for all non-managed pages */
-               if (PageError(page)) {
-                       DBG_BUGON(PageUptodate(page));
-                       err = -EIO;
-               }
+       mutex_lock(&pcl->lock);
+       be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
+
+       /* allocate (de)compressed page arrays if cannot be kept on stack */
+       be->decompressed_pages = NULL;
+       be->compressed_pages = NULL;
+       be->onstack_used = 0;
+       if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) {
+               be->decompressed_pages = be->onstack_pages;
+               be->onstack_used = be->nr_pages;
+               memset(be->decompressed_pages, 0,
+                      sizeof(struct page *) * be->nr_pages);
        }
 
+       if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES)
+               be->compressed_pages = be->onstack_pages + be->onstack_used;
+
+       if (!be->decompressed_pages)
+               be->decompressed_pages =
+                       kvcalloc(be->nr_pages, sizeof(struct page *),
+                                GFP_KERNEL | __GFP_NOFAIL);
+       if (!be->compressed_pages)
+               be->compressed_pages =
+                       kvcalloc(pclusterpages, sizeof(struct page *),
+                                GFP_KERNEL | __GFP_NOFAIL);
+
+       z_erofs_parse_out_bvecs(be);
+       err2 = z_erofs_parse_in_bvecs(be, &overlapped);
+       if (err2)
+               err = err2;
        if (err)
                goto out;
 
-       llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT;
-       if (nr_pages << PAGE_SHIFT >= pcl->pageofs_out + llen) {
-               outputsize = llen;
-               partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH);
-       } else {
-               outputsize = (nr_pages << PAGE_SHIFT) - pcl->pageofs_out;
-               partial = true;
-       }
-
        if (z_erofs_is_inline_pcluster(pcl))
                inputsize = pcl->tailpacking_size;
        else
                inputsize = pclusterpages * PAGE_SIZE;
 
        err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
-                                       .sb = sb,
-                                       .in = compressed_pages,
-                                       .out = pages,
+                                       .sb = be->sb,
+                                       .in = be->compressed_pages,
+                                       .out = be->decompressed_pages,
                                        .pageofs_in = pcl->pageofs_in,
                                        .pageofs_out = pcl->pageofs_out,
                                        .inputsize = inputsize,
-                                       .outputsize = outputsize,
+                                       .outputsize = pcl->length,
                                        .alg = pcl->algorithmformat,
                                        .inplace_io = overlapped,
-                                       .partial_decoding = partial
-                                }, pagepool);
+                                       .partial_decoding = pcl->partial,
+                                       .fillgaps = pcl->multibases,
+                                }, be->pagepool);
 
 out:
        /* must handle all compressed pages before actual file pages */
        if (z_erofs_is_inline_pcluster(pcl)) {
-               page = compressed_pages[0];
-               WRITE_ONCE(compressed_pages[0], NULL);
+               page = pcl->compressed_bvecs[0].page;
+               WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
                put_page(page);
        } else {
                for (i = 0; i < pclusterpages; ++i) {
-                       page = compressed_pages[i];
+                       page = pcl->compressed_bvecs[i].page;
 
                        if (erofs_page_is_managed(sbi, page))
                                continue;
 
                        /* recycle all individual short-lived pages */
-                       (void)z_erofs_put_shortlivedpage(pagepool, page);
-                       WRITE_ONCE(compressed_pages[i], NULL);
+                       (void)z_erofs_put_shortlivedpage(be->pagepool, page);
+                       WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
                }
        }
+       if (be->compressed_pages < be->onstack_pages ||
+           be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
+               kvfree(be->compressed_pages);
+       z_erofs_fill_other_copies(be, err);
 
-       for (i = 0; i < nr_pages; ++i) {
-               page = pages[i];
+       for (i = 0; i < be->nr_pages; ++i) {
+               page = be->decompressed_pages[i];
                if (!page)
                        continue;
 
                DBG_BUGON(z_erofs_page_is_invalidated(page));
 
                /* recycle all individual short-lived pages */
-               if (z_erofs_put_shortlivedpage(pagepool, page))
+               if (z_erofs_put_shortlivedpage(be->pagepool, page))
                        continue;
-
-               if (err < 0)
-                       SetPageError(page);
-
+               if (err)
+                       z_erofs_page_mark_eio(page);
                z_erofs_onlinepage_endio(page);
        }
 
-       if (pages == z_pagemap_global)
-               mutex_unlock(&z_pagemap_global_lock);
-       else if (pages != pages_onstack)
-               kvfree(pages);
+       if (be->decompressed_pages != be->onstack_pages)
+               kvfree(be->decompressed_pages);
 
-       pcl->nr_pages = 0;
+       pcl->length = 0;
+       pcl->partial = true;
+       pcl->multibases = false;
+       pcl->bvset.nextpage = NULL;
        pcl->vcnt = 0;
 
        /* pcluster lock MUST be taken before the following line */
@@ -997,22 +1085,25 @@ out:
 static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
                                     struct page **pagepool)
 {
+       struct z_erofs_decompress_backend be = {
+               .sb = io->sb,
+               .pagepool = pagepool,
+               .decompressed_secondary_bvecs =
+                       LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
+       };
        z_erofs_next_pcluster_t owned = io->head;
 
        while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) {
-               struct z_erofs_pcluster *pcl;
-
-               /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
+               /* impossible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
                DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL);
-
-               /* no possible that 'owned' equals NULL */
+               /* impossible that 'owned' equals Z_EROFS_PCLUSTER_NIL */
                DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
 
-               pcl = container_of(owned, struct z_erofs_pcluster, next);
-               owned = READ_ONCE(pcl->next);
+               be.pcl = container_of(owned, struct z_erofs_pcluster, next);
+               owned = READ_ONCE(be.pcl->next);
 
-               z_erofs_decompress_pcluster(io->sb, pcl, pagepool);
-               erofs_workgroup_put(&pcl->obj);
+               z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0);
+               erofs_workgroup_put(&be.pcl->obj);
        }
 }
 
@@ -1038,7 +1129,6 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
        if (sync) {
                if (!atomic_add_return(bios, &io->pending_bios))
                        complete(&io->u.done);
-
                return;
        }
 
@@ -1071,7 +1161,7 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
        int justfound;
 
 repeat:
-       page = READ_ONCE(pcl->compressed_pages[nr]);
+       page = READ_ONCE(pcl->compressed_bvecs[nr].page);
        oldpage = page;
 
        if (!page)
@@ -1087,7 +1177,7 @@ repeat:
         * otherwise, it will go inplace I/O path instead.
         */
        if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
-               WRITE_ONCE(pcl->compressed_pages[nr], page);
+               WRITE_ONCE(pcl->compressed_bvecs[nr].page, page);
                set_page_private(page, 0);
                tocache = true;
                goto out_tocache;
@@ -1113,14 +1203,13 @@ repeat:
 
        /* the page is still in manage cache */
        if (page->mapping == mc) {
-               WRITE_ONCE(pcl->compressed_pages[nr], page);
+               WRITE_ONCE(pcl->compressed_bvecs[nr].page, page);
 
-               ClearPageError(page);
                if (!PagePrivate(page)) {
                        /*
                         * impossible to be !PagePrivate(page) for
                         * the current restriction as well if
-                        * the page is already in compressed_pages[].
+                        * the page is already in compressed_bvecs[].
                         */
                        DBG_BUGON(!justfound);
 
@@ -1149,7 +1238,8 @@ repeat:
        put_page(page);
 out_allocpage:
        page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
-       if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
+       if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page,
+                              oldpage, page)) {
                erofs_pagepool_add(pagepool, page);
                cond_resched();
                goto repeat;
@@ -1186,6 +1276,7 @@ fg_out:
                q = fgq;
                init_completion(&fgq->u.done);
                atomic_set(&fgq->pending_bios, 0);
+               q->eio = false;
        }
        q->sb = sb;
        q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
@@ -1246,26 +1337,25 @@ static void z_erofs_decompressqueue_endio(struct bio *bio)
                DBG_BUGON(PageUptodate(page));
                DBG_BUGON(z_erofs_page_is_invalidated(page));
 
-               if (err)
-                       SetPageError(page);
-
                if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
                        if (!err)
                                SetPageUptodate(page);
                        unlock_page(page);
                }
        }
+       if (err)
+               q->eio = true;
        z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
        bio_put(bio);
 }
 
-static void z_erofs_submit_queue(struct super_block *sb,
-                                struct z_erofs_decompress_frontend *f,
+static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
                                 struct page **pagepool,
                                 struct z_erofs_decompressqueue *fgq,
                                 bool *force_fg)
 {
-       struct erofs_sb_info *const sbi = EROFS_SB(sb);
+       struct super_block *sb = f->inode->i_sb;
+       struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
        z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
        struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
        void *bi_private;
@@ -1317,7 +1407,7 @@ static void z_erofs_submit_queue(struct super_block *sb,
                        struct page *page;
 
                        page = pickup_page_for_submission(pcl, i++, pagepool,
-                                                         MNGD_MAPPING(sbi));
+                                                         mc);
                        if (!page)
                                continue;
 
@@ -1369,15 +1459,14 @@ submit_bio_retry:
        z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
 }
 
-static void z_erofs_runqueue(struct super_block *sb,
-                            struct z_erofs_decompress_frontend *f,
+static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
                             struct page **pagepool, bool force_fg)
 {
        struct z_erofs_decompressqueue io[NR_JOBQUEUES];
 
        if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
                return;
-       z_erofs_submit_queue(sb, f, pagepool, io, &force_fg);
+       z_erofs_submit_queue(f, pagepool, io, &force_fg);
 
        /* handle bypass queue (no i/o pclusters) immediately */
        z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
@@ -1475,7 +1564,7 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
        (void)z_erofs_collector_end(&f);
 
        /* if some compressed cluster ready, need submit them anyway */
-       z_erofs_runqueue(inode->i_sb, &f, &pagepool,
+       z_erofs_runqueue(&f, &pagepool,
                         z_erofs_get_sync_decompress_policy(sbi, 0));
 
        if (err)
@@ -1524,7 +1613,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
        z_erofs_pcluster_readmore(&f, rac, 0, &pagepool, false);
        (void)z_erofs_collector_end(&f);
 
-       z_erofs_runqueue(inode->i_sb, &f, &pagepool,
+       z_erofs_runqueue(&f, &pagepool,
                         z_erofs_get_sync_decompress_policy(sbi, nr_pages));
        erofs_put_metabuf(&f.map.buf);
        erofs_release_pages(&pagepool);
index 58053bb..e7f04c4 100644 (file)
@@ -7,13 +7,10 @@
 #define __EROFS_FS_ZDATA_H
 
 #include "internal.h"
-#include "zpvec.h"
+#include "tagptr.h"
 
 #define Z_EROFS_PCLUSTER_MAX_PAGES     (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
-#define Z_EROFS_NR_INLINE_PAGEVECS      3
-
-#define Z_EROFS_PCLUSTER_FULL_LENGTH    0x00000001
-#define Z_EROFS_PCLUSTER_LENGTH_BIT     1
+#define Z_EROFS_INLINE_BVECS           2
 
 /*
  * let's leave a type here in case of introducing
  */
 typedef void *z_erofs_next_pcluster_t;
 
+struct z_erofs_bvec {
+       struct page *page;
+       int offset;
+       unsigned int end;
+};
+
+#define __Z_EROFS_BVSET(name, total) \
+struct name { \
+       /* point to the next page which contains the following bvecs */ \
+       struct page *nextpage; \
+       struct z_erofs_bvec bvec[total]; \
+}
+__Z_EROFS_BVSET(z_erofs_bvset,);
+__Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
+
 /*
  * Structure fields follow one of the following exclusion rules.
  *
@@ -38,24 +50,21 @@ struct z_erofs_pcluster {
        /* A: point to next chained pcluster or TAILs */
        z_erofs_next_pcluster_t next;
 
-       /* A: lower limit of decompressed length and if full length or not */
+       /* L: the maximum decompression size of this round */
        unsigned int length;
 
+       /* L: total number of bvecs */
+       unsigned int vcnt;
+
        /* I: page offset of start position of decompression */
        unsigned short pageofs_out;
 
        /* I: page offset of inline compressed data */
        unsigned short pageofs_in;
 
-       /* L: maximum relative page index in pagevec[] */
-       unsigned short nr_pages;
-
-       /* L: total number of pages in pagevec[] */
-       unsigned int vcnt;
-
        union {
-               /* L: inline a certain number of pagevecs for bootstrap */
-               erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
+               /* L: inline a certain number of bvec for bootstrap */
+               struct z_erofs_bvset_inline bvset;
 
                /* I: can be used to free the pcluster by RCU. */
                struct rcu_head rcu;
@@ -72,8 +81,14 @@ struct z_erofs_pcluster {
        /* I: compression algorithm format */
        unsigned char algorithmformat;
 
-       /* A: compressed pages (can be cached or inplaced pages) */
-       struct page *compressed_pages[];
+       /* L: whether partial decompression or not */
+       bool partial;
+
+       /* L: indicate several pageofs_outs or not */
+       bool multibases;
+
+       /* A: compressed bvecs (can be cached or inplaced pages) */
+       struct z_erofs_bvec compressed_bvecs[];
 };
 
 /* let's avoid the valid 32-bit kernel addresses */
@@ -94,6 +109,8 @@ struct z_erofs_decompressqueue {
                struct completion done;
                struct work_struct work;
        } u;
+
+       bool eio;
 };
 
 static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
@@ -108,38 +125,17 @@ static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
        return pcl->pclusterpages;
 }
 
-#define Z_EROFS_ONLINEPAGE_COUNT_BITS   2
-#define Z_EROFS_ONLINEPAGE_COUNT_MASK   ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
-#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT  (Z_EROFS_ONLINEPAGE_COUNT_BITS)
-
 /*
- * waiters (aka. ongoing_packs): # to unlock the page
- * sub-index: 0 - for partial page, >= 1 full page sub-index
+ * bit 31: I/O error occurred on this page
+ * bit 0 - 30: remaining parts to complete this page
  */
-typedef atomic_t z_erofs_onlinepage_t;
-
-/* type punning */
-union z_erofs_onlinepage_converter {
-       z_erofs_onlinepage_t *o;
-       unsigned long *v;
-};
-
-static inline unsigned int z_erofs_onlinepage_index(struct page *page)
-{
-       union z_erofs_onlinepage_converter u;
-
-       DBG_BUGON(!PagePrivate(page));
-       u.v = &page_private(page);
-
-       return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
-}
+#define Z_EROFS_PAGE_EIO                       (1 << 31)
 
 static inline void z_erofs_onlinepage_init(struct page *page)
 {
        union {
-               z_erofs_onlinepage_t o;
+               atomic_t o;
                unsigned long v;
-       /* keep from being unlocked in advance */
        } u = { .o = ATOMIC_INIT(1) };
 
        set_page_private(page, u.v);
@@ -147,49 +143,36 @@ static inline void z_erofs_onlinepage_init(struct page *page)
        SetPagePrivate(page);
 }
 
-static inline void z_erofs_onlinepage_fixup(struct page *page,
-       uintptr_t index, bool down)
+static inline void z_erofs_onlinepage_split(struct page *page)
 {
-       union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
-       int orig, orig_index, val;
-
-repeat:
-       orig = atomic_read(u.o);
-       orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
-       if (orig_index) {
-               if (!index)
-                       return;
+       atomic_inc((atomic_t *)&page->private);
+}
 
-               DBG_BUGON(orig_index != index);
-       }
+static inline void z_erofs_page_mark_eio(struct page *page)
+{
+       int orig;
 
-       val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
-               ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
-       if (atomic_cmpxchg(u.o, orig, val) != orig)
-               goto repeat;
+       do {
+               orig = atomic_read((atomic_t *)&page->private);
+       } while (atomic_cmpxchg((atomic_t *)&page->private, orig,
+                               orig | Z_EROFS_PAGE_EIO) != orig);
 }
 
 static inline void z_erofs_onlinepage_endio(struct page *page)
 {
-       union z_erofs_onlinepage_converter u;
        unsigned int v;
 
        DBG_BUGON(!PagePrivate(page));
-       u.v = &page_private(page);
-
-       v = atomic_dec_return(u.o);
-       if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
+       v = atomic_dec_return((atomic_t *)&page->private);
+       if (!(v & ~Z_EROFS_PAGE_EIO)) {
                set_page_private(page, 0);
                ClearPagePrivate(page);
-               if (!PageError(page))
+               if (!(v & Z_EROFS_PAGE_EIO))
                        SetPageUptodate(page);
                unlock_page(page);
        }
-       erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o));
 }
 
-#define Z_EROFS_VMAP_ONSTACK_PAGES     \
-       min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
-#define Z_EROFS_VMAP_GLOBAL_PAGES      2048
+#define Z_EROFS_ONSTACK_PAGES          32
 
 #endif
diff --git a/fs/erofs/zpvec.h b/fs/erofs/zpvec.h
deleted file mode 100644 (file)
index b05464f..0000000
+++ /dev/null
@@ -1,159 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2018 HUAWEI, Inc.
- *             https://www.huawei.com/
- */
-#ifndef __EROFS_FS_ZPVEC_H
-#define __EROFS_FS_ZPVEC_H
-
-#include "tagptr.h"
-
-/* page type in pagevec for decompress subsystem */
-enum z_erofs_page_type {
-       /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */
-       Z_EROFS_PAGE_TYPE_EXCLUSIVE,
-
-       Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED,
-
-       Z_EROFS_VLE_PAGE_TYPE_HEAD,
-       Z_EROFS_VLE_PAGE_TYPE_MAX
-};
-
-extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0")
-       __bad_page_type_exclusive(void);
-
-/* pagevec tagged pointer */
-typedef tagptr2_t      erofs_vtptr_t;
-
-/* pagevec collector */
-struct z_erofs_pagevec_ctor {
-       struct page *curr, *next;
-       erofs_vtptr_t *pages;
-
-       unsigned int nr, index;
-};
-
-static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor,
-                                            bool atomic)
-{
-       if (!ctor->curr)
-               return;
-
-       if (atomic)
-               kunmap_atomic(ctor->pages);
-       else
-               kunmap(ctor->curr);
-}
-
-static inline struct page *
-z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor,
-                              unsigned int nr)
-{
-       unsigned int index;
-
-       /* keep away from occupied pages */
-       if (ctor->next)
-               return ctor->next;
-
-       for (index = 0; index < nr; ++index) {
-               const erofs_vtptr_t t = ctor->pages[index];
-               const unsigned int tags = tagptr_unfold_tags(t);
-
-               if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE)
-                       return tagptr_unfold_ptr(t);
-       }
-       DBG_BUGON(nr >= ctor->nr);
-       return NULL;
-}
-
-static inline void
-z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor,
-                             bool atomic)
-{
-       struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr);
-
-       z_erofs_pagevec_ctor_exit(ctor, atomic);
-
-       ctor->curr = next;
-       ctor->next = NULL;
-       ctor->pages = atomic ?
-               kmap_atomic(ctor->curr) : kmap(ctor->curr);
-
-       ctor->nr = PAGE_SIZE / sizeof(struct page *);
-       ctor->index = 0;
-}
-
-static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
-                                            unsigned int nr,
-                                            erofs_vtptr_t *pages,
-                                            unsigned int i)
-{
-       ctor->nr = nr;
-       ctor->curr = ctor->next = NULL;
-       ctor->pages = pages;
-
-       if (i >= nr) {
-               i -= nr;
-               z_erofs_pagevec_ctor_pagedown(ctor, false);
-               while (i > ctor->nr) {
-                       i -= ctor->nr;
-                       z_erofs_pagevec_ctor_pagedown(ctor, false);
-               }
-       }
-       ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i);
-       ctor->index = i;
-}
-
-static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
-                                          struct page *page,
-                                          enum z_erofs_page_type type,
-                                          bool pvec_safereuse)
-{
-       if (!ctor->next) {
-               /* some pages cannot be reused as pvec safely without I/O */
-               if (type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && !pvec_safereuse)
-                       type = Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED;
-
-               if (type != Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
-                   ctor->index + 1 == ctor->nr)
-                       return false;
-       }
-
-       if (ctor->index >= ctor->nr)
-               z_erofs_pagevec_ctor_pagedown(ctor, false);
-
-       /* exclusive page type must be 0 */
-       if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL)
-               __bad_page_type_exclusive();
-
-       /* should remind that collector->next never equal to 1, 2 */
-       if (type == (uintptr_t)ctor->next) {
-               ctor->next = page;
-       }
-       ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type);
-       return true;
-}
-
-static inline struct page *
-z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor,
-                       enum z_erofs_page_type *type)
-{
-       erofs_vtptr_t t;
-
-       if (ctor->index >= ctor->nr) {
-               DBG_BUGON(!ctor->next);
-               z_erofs_pagevec_ctor_pagedown(ctor, true);
-       }
-
-       t = ctor->pages[ctor->index];
-
-       *type = tagptr_unfold_tags(t);
-
-       /* should remind that collector->next never equal to 1, 2 */
-       if (*type == (uintptr_t)ctor->next)
-               ctor->next = tagptr_unfold_ptr(t);
-
-       ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0);
-       return tagptr_unfold_ptr(t);
-}
-#endif
index 0989fb8..7781232 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1301,7 +1301,7 @@ int begin_new_exec(struct linux_binprm * bprm)
        bprm->mm = NULL;
 
 #ifdef CONFIG_POSIX_TIMERS
-       exit_itimers(me->signal);
+       exit_itimers(me);
        flush_itimer_signals();
 #endif
 
index 76acc37..c6eaf7e 100644 (file)
@@ -1198,7 +1198,9 @@ static int __exfat_rename(struct inode *old_parent_inode,
                return -ENOENT;
        }
 
-       exfat_chain_dup(&olddir, &ei->dir);
+       exfat_chain_set(&olddir, EXFAT_I(old_parent_inode)->start_clu,
+               EXFAT_B_TO_CLU_ROUND_UP(i_size_read(old_parent_inode), sbi),
+               EXFAT_I(old_parent_inode)->flags);
        dentry = ei->entry;
 
        ep = exfat_get_dentry(sb, &olddir, dentry, &old_bh);
index 2c2f179..43de293 100644 (file)
@@ -672,17 +672,14 @@ int ext2_empty_dir (struct inode * inode)
        void *page_addr = NULL;
        struct page *page = NULL;
        unsigned long i, npages = dir_pages(inode);
-       int dir_has_error = 0;
 
        for (i = 0; i < npages; i++) {
                char *kaddr;
                ext2_dirent * de;
-               page = ext2_get_page(inode, i, dir_has_error, &page_addr);
+               page = ext2_get_page(inode, i, 0, &page_addr);
 
-               if (IS_ERR(page)) {
-                       dir_has_error = 1;
-                       continue;
-               }
+               if (IS_ERR(page))
+                       goto not_empty;
 
                kaddr = page_addr;
                de = (ext2_dirent *)kaddr;
index e6b9322..7a192e4 100644 (file)
@@ -1679,14 +1679,14 @@ int ext2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
        if (error)
                return error;
 
-       if (is_quota_modification(inode, iattr)) {
+       if (is_quota_modification(mnt_userns, inode, iattr)) {
                error = dquot_initialize(inode);
                if (error)
                        return error;
        }
-       if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
-           (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
-               error = dquot_transfer(inode, iattr);
+       if (i_uid_needs_update(mnt_userns, iattr, inode) ||
+           i_gid_needs_update(mnt_userns, iattr, inode)) {
+               error = dquot_transfer(mnt_userns, inode, iattr);
                if (error)
                        return error;
        }
index f6a19f6..6f475d2 100644 (file)
@@ -1059,9 +1059,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
                        sbi->s_frags_per_group);
                goto failed_mount;
        }
-       if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
+       if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
+           sbi->s_inodes_per_group > sb->s_blocksize * 8) {
                ext2_msg(sb, KERN_ERR,
-                       "error: #inodes per group too big: %lu",
+                       "error: invalid #inodes per group: %lu",
                        sbi->s_inodes_per_group);
                goto failed_mount;
        }
@@ -1071,6 +1072,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
        sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
                                le32_to_cpu(es->s_first_data_block) - 1)
                                        / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
+       if ((u64)sbi->s_groups_count * sbi->s_inodes_per_group !=
+           le32_to_cpu(es->s_inodes_count)) {
+               ext2_msg(sb, KERN_ERR, "error: invalid #inodes: %u vs computed %llu",
+                        le32_to_cpu(es->s_inodes_count),
+                        (u64)sbi->s_groups_count * sbi->s_inodes_per_group);
+               goto failed_mount;
+       }
        db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
                   EXT2_DESC_PER_BLOCK(sb);
        sbi->s_group_desc = kmalloc_array(db_count,
@@ -1490,8 +1498,7 @@ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
                len = i_size-off;
        toread = len;
        while (toread > 0) {
-               tocopy = sb->s_blocksize - offset < toread ?
-                               sb->s_blocksize - offset : toread;
+               tocopy = min_t(size_t, sb->s_blocksize - offset, toread);
 
                tmp_bh.b_state = 0;
                tmp_bh.b_size = sb->s_blocksize;
@@ -1529,8 +1536,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
        struct buffer_head *bh;
 
        while (towrite > 0) {
-               tocopy = sb->s_blocksize - offset < towrite ?
-                               sb->s_blocksize - offset : towrite;
+               tocopy = min_t(size_t, sb->s_blocksize - offset, towrite);
 
                tmp_bh.b_state = 0;
                tmp_bh.b_size = sb->s_blocksize;
index 3dce7d0..3dcc1dd 100644 (file)
@@ -829,7 +829,7 @@ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
        ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
                   inode->i_ino, create);
        return _ext4_get_block(inode, iblock, bh_result,
-                              EXT4_GET_BLOCKS_IO_CREATE_EXT);
+                              EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
 }
 
 /* Maximum number of blocks we map for direct IO at once. */
@@ -5350,14 +5350,14 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
        if (error)
                return error;
 
-       if (is_quota_modification(inode, attr)) {
+       if (is_quota_modification(mnt_userns, inode, attr)) {
                error = dquot_initialize(inode);
                if (error)
                        return error;
        }
 
-       if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
-           (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
+       if (i_uid_needs_update(mnt_userns, attr, inode) ||
+           i_gid_needs_update(mnt_userns, attr, inode)) {
                handle_t *handle;
 
                /* (user+group)*(old+new) structure, inode write (sb,
@@ -5374,7 +5374,7 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                 * counts xattr inode references.
                 */
                down_read(&EXT4_I(inode)->xattr_sem);
-               error = dquot_transfer(inode, attr);
+               error = dquot_transfer(mnt_userns, inode, attr);
                up_read(&EXT4_I(inode)->xattr_sem);
 
                if (error) {
@@ -5383,10 +5383,8 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                }
                /* Update corresponding info in inode so that everything is in
                 * one transaction */
-               if (attr->ia_valid & ATTR_UID)
-                       inode->i_uid = attr->ia_uid;
-               if (attr->ia_valid & ATTR_GID)
-                       inode->i_gid = attr->ia_gid;
+               i_uid_update(mnt_userns, attr, inode);
+               i_gid_update(mnt_userns, attr, inode);
                error = ext4_mark_inode_dirty(handle, inode);
                ext4_journal_stop(handle);
                if (unlikely(error)) {
index 9f12f29..9e06334 100644 (file)
@@ -4104,6 +4104,15 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
        size = size >> bsbits;
        start = start_off >> bsbits;
 
+       /*
+        * For tiny groups (smaller than 8MB) the chosen allocation
+        * alignment may be larger than group size. Make sure the
+        * alignment does not move allocation to a different group which
+        * makes mballoc fail assertions later.
+        */
+       start = max(start, rounddown(ac->ac_o_ex.fe_logical,
+                       (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
+
        /* don't cover already allocated blocks in selected range */
        if (ar->pleft && start <= ar->lleft) {
                size -= ar->lleft + 1 - start;
@@ -4176,7 +4185,22 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
        }
        rcu_read_unlock();
 
-       if (start + size <= ac->ac_o_ex.fe_logical &&
+       /*
+        * In this function "start" and "size" are normalized for better
+        * alignment and length such that we could preallocate more blocks.
+        * This normalization is done such that original request of
+        * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and
+        * "size" boundaries.
+        * (Note fe_len can be relaxed since FS block allocation API does not
+        * provide gurantee on number of contiguous blocks allocation since that
+        * depends upon free space left, etc).
+        * In case of inode pa, later we use the allocated blocks
+        * [pa_start + fe_logical - pa_lstart, fe_len/size] from the preallocated
+        * range of goal/best blocks [start, size] to put it at the
+        * ac_o_ex.fe_logical extent of this inode.
+        * (See ext4_mb_use_inode_pa() for more details)
+        */
+       if (start + size <= ac->ac_o_ex.fe_logical ||
                        start > ac->ac_o_ex.fe_logical) {
                ext4_msg(ac->ac_sb, KERN_ERR,
                         "start %lu, size %lu, fe_logical %lu",
index 7a5353a..42f5905 100644 (file)
@@ -438,7 +438,7 @@ int ext4_ext_migrate(struct inode *inode)
 
        /*
         * Worst case we can touch the allocation bitmaps and a block
-        * group descriptor block.  We do need need to worry about
+        * group descriptor block.  We do need to worry about
         * credits for modifying the quota inode.
         */
        handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
index 47d0ca4..db4ba99 100644 (file)
@@ -1929,7 +1929,8 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
                        struct dx_hash_info *hinfo)
 {
        unsigned blocksize = dir->i_sb->s_blocksize;
-       unsigned count, continued;
+       unsigned continued;
+       int count;
        struct buffer_head *bh2;
        ext4_lblk_t newblock;
        u32 hash2;
index 14695e2..97fa7b4 100644 (file)
@@ -465,7 +465,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
        /*
         * In the first loop we prepare and mark buffers to submit. We have to
         * mark all buffers in the page before submitting so that
-        * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
+        * end_page_writeback() cannot be called from ext4_end_bio() when IO
         * on the first buffer finishes and we are still working on submitting
         * the second buffer.
         */
index 90a941d..8b70a47 100644 (file)
@@ -54,6 +54,16 @@ int ext4_resize_begin(struct super_block *sb)
                return -EPERM;
 
        /*
+        * If the reserved GDT blocks is non-zero, the resize_inode feature
+        * should always be set.
+        */
+       if (EXT4_SB(sb)->s_es->s_reserved_gdt_blocks &&
+           !ext4_has_feature_resize_inode(sb)) {
+               ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero");
+               return -EFSCORRUPTED;
+       }
+
+       /*
         * If we are not using the primary superblock/GDT copy don't resize,
          * because the user tools have no way of handling this.  Probably a
          * bad time to do it anyways.
index 450c918..845f2f8 100644 (file)
@@ -87,7 +87,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
 static int ext4_validate_options(struct fs_context *fc);
 static int ext4_check_opt_consistency(struct fs_context *fc,
                                      struct super_block *sb);
-static int ext4_apply_options(struct fs_context *fc, struct super_block *sb);
+static void ext4_apply_options(struct fs_context *fc, struct super_block *sb);
 static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param);
 static int ext4_get_tree(struct fs_context *fc);
 static int ext4_reconfigure(struct fs_context *fc);
@@ -1870,31 +1870,12 @@ ext4_sb_read_encoding(const struct ext4_super_block *es)
 }
 #endif
 
-static int ext4_set_test_dummy_encryption(struct super_block *sb, char *arg)
-{
-#ifdef CONFIG_FS_ENCRYPTION
-       struct ext4_sb_info *sbi = EXT4_SB(sb);
-       int err;
-
-       err = fscrypt_set_test_dummy_encryption(sb, arg,
-                                               &sbi->s_dummy_enc_policy);
-       if (err) {
-               ext4_msg(sb, KERN_WARNING,
-                        "Error while setting test dummy encryption [%d]", err);
-               return err;
-       }
-       ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled");
-#endif
-       return 0;
-}
-
 #define EXT4_SPEC_JQUOTA                       (1 <<  0)
 #define EXT4_SPEC_JQFMT                                (1 <<  1)
 #define EXT4_SPEC_DATAJ                                (1 <<  2)
 #define EXT4_SPEC_SB_BLOCK                     (1 <<  3)
 #define EXT4_SPEC_JOURNAL_DEV                  (1 <<  4)
 #define EXT4_SPEC_JOURNAL_IOPRIO               (1 <<  5)
-#define EXT4_SPEC_DUMMY_ENCRYPTION             (1 <<  6)
 #define EXT4_SPEC_s_want_extra_isize           (1 <<  7)
 #define EXT4_SPEC_s_max_batch_time             (1 <<  8)
 #define EXT4_SPEC_s_min_batch_time             (1 <<  9)
@@ -1911,7 +1892,7 @@ static int ext4_set_test_dummy_encryption(struct super_block *sb, char *arg)
 
 struct ext4_fs_context {
        char            *s_qf_names[EXT4_MAXQUOTAS];
-       char            *test_dummy_enc_arg;
+       struct fscrypt_dummy_policy dummy_enc_policy;
        int             s_jquota_fmt;   /* Format of quota to use */
 #ifdef CONFIG_EXT4_DEBUG
        int s_fc_debug_max_replay;
@@ -1953,7 +1934,7 @@ static void ext4_fc_free(struct fs_context *fc)
        for (i = 0; i < EXT4_MAXQUOTAS; i++)
                kfree(ctx->s_qf_names[i]);
 
-       kfree(ctx->test_dummy_enc_arg);
+       fscrypt_free_dummy_policy(&ctx->dummy_enc_policy);
        kfree(ctx);
 }
 
@@ -2029,6 +2010,29 @@ static int unnote_qf_name(struct fs_context *fc, int qtype)
 }
 #endif
 
+static int ext4_parse_test_dummy_encryption(const struct fs_parameter *param,
+                                           struct ext4_fs_context *ctx)
+{
+       int err;
+
+       if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
+               ext4_msg(NULL, KERN_WARNING,
+                        "test_dummy_encryption option not supported");
+               return -EINVAL;
+       }
+       err = fscrypt_parse_test_dummy_encryption(param,
+                                                 &ctx->dummy_enc_policy);
+       if (err == -EINVAL) {
+               ext4_msg(NULL, KERN_WARNING,
+                        "Value of option \"%s\" is unrecognized", param->key);
+       } else if (err == -EEXIST) {
+               ext4_msg(NULL, KERN_WARNING,
+                        "Conflicting test_dummy_encryption options");
+               return -EINVAL;
+       }
+       return err;
+}
+
 #define EXT4_SET_CTX(name)                                             \
 static inline void ctx_set_##name(struct ext4_fs_context *ctx,         \
                                  unsigned long flag)                   \
@@ -2291,29 +2295,7 @@ static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param)
                ctx->spec |= EXT4_SPEC_JOURNAL_IOPRIO;
                return 0;
        case Opt_test_dummy_encryption:
-#ifdef CONFIG_FS_ENCRYPTION
-               if (param->type == fs_value_is_flag) {
-                       ctx->spec |= EXT4_SPEC_DUMMY_ENCRYPTION;
-                       ctx->test_dummy_enc_arg = NULL;
-                       return 0;
-               }
-               if (*param->string &&
-                   !(!strcmp(param->string, "v1") ||
-                     !strcmp(param->string, "v2"))) {
-                       ext4_msg(NULL, KERN_WARNING,
-                                "Value of option \"%s\" is unrecognized",
-                                param->key);
-                       return -EINVAL;
-               }
-               ctx->spec |= EXT4_SPEC_DUMMY_ENCRYPTION;
-               ctx->test_dummy_enc_arg = kmemdup_nul(param->string, param->size,
-                                                     GFP_KERNEL);
-               return 0;
-#else
-               ext4_msg(NULL, KERN_WARNING,
-                        "test_dummy_encryption option not supported");
-               return -EINVAL;
-#endif
+               return ext4_parse_test_dummy_encryption(param, ctx);
        case Opt_dax:
        case Opt_dax_type:
 #ifdef CONFIG_FS_DAX
@@ -2504,7 +2486,8 @@ parse_failed:
        if (s_ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)
                m_ctx->journal_ioprio = s_ctx->journal_ioprio;
 
-       ret = ext4_apply_options(fc, sb);
+       ext4_apply_options(fc, sb);
+       ret = 0;
 
 out_free:
        if (fc) {
@@ -2673,11 +2656,11 @@ err_jquota_specified:
 static int ext4_check_test_dummy_encryption(const struct fs_context *fc,
                                            struct super_block *sb)
 {
-#ifdef CONFIG_FS_ENCRYPTION
        const struct ext4_fs_context *ctx = fc->fs_private;
        const struct ext4_sb_info *sbi = EXT4_SB(sb);
+       int err;
 
-       if (!(ctx->spec & EXT4_SPEC_DUMMY_ENCRYPTION))
+       if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy))
                return 0;
 
        if (!ext4_has_feature_encrypt(sb)) {
@@ -2691,14 +2674,46 @@ static int ext4_check_test_dummy_encryption(const struct fs_context *fc,
         * needed to allow it to be set or changed during remount.  We do allow
         * it to be specified during remount, but only if there is no change.
         */
-       if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE &&
-           !sbi->s_dummy_enc_policy.policy) {
+       if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+               if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy,
+                                                &ctx->dummy_enc_policy))
+                       return 0;
                ext4_msg(NULL, KERN_WARNING,
-                        "Can't set test_dummy_encryption on remount");
+                        "Can't set or change test_dummy_encryption on remount");
                return -EINVAL;
        }
-#endif /* CONFIG_FS_ENCRYPTION */
-       return 0;
+       /* Also make sure s_mount_opts didn't contain a conflicting value. */
+       if (fscrypt_is_dummy_policy_set(&sbi->s_dummy_enc_policy)) {
+               if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy,
+                                                &ctx->dummy_enc_policy))
+                       return 0;
+               ext4_msg(NULL, KERN_WARNING,
+                        "Conflicting test_dummy_encryption options");
+               return -EINVAL;
+       }
+       /*
+        * fscrypt_add_test_dummy_key() technically changes the super_block, so
+        * technically it should be delayed until ext4_apply_options() like the
+        * other changes.  But since we never get here for remounts (see above),
+        * and this is the last chance to report errors, we do it here.
+        */
+       err = fscrypt_add_test_dummy_key(sb, &ctx->dummy_enc_policy);
+       if (err)
+               ext4_msg(NULL, KERN_WARNING,
+                        "Error adding test dummy encryption key [%d]", err);
+       return err;
+}
+
+static void ext4_apply_test_dummy_encryption(struct ext4_fs_context *ctx,
+                                            struct super_block *sb)
+{
+       if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy) ||
+           /* if already set, it was already verified to be the same */
+           fscrypt_is_dummy_policy_set(&EXT4_SB(sb)->s_dummy_enc_policy))
+               return;
+       EXT4_SB(sb)->s_dummy_enc_policy = ctx->dummy_enc_policy;
+       memset(&ctx->dummy_enc_policy, 0, sizeof(ctx->dummy_enc_policy));
+       ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled");
 }
 
 static int ext4_check_opt_consistency(struct fs_context *fc,
@@ -2785,11 +2800,10 @@ fail_dax_change_remount:
        return ext4_check_quota_consistency(fc, sb);
 }
 
-static int ext4_apply_options(struct fs_context *fc, struct super_block *sb)
+static void ext4_apply_options(struct fs_context *fc, struct super_block *sb)
 {
        struct ext4_fs_context *ctx = fc->fs_private;
        struct ext4_sb_info *sbi = fc->s_fs_info;
-       int ret = 0;
 
        sbi->s_mount_opt &= ~ctx->mask_s_mount_opt;
        sbi->s_mount_opt |= ctx->vals_s_mount_opt;
@@ -2825,11 +2839,7 @@ static int ext4_apply_options(struct fs_context *fc, struct super_block *sb)
 #endif
 
        ext4_apply_quota_options(fc, sb);
-
-       if (ctx->spec & EXT4_SPEC_DUMMY_ENCRYPTION)
-               ret = ext4_set_test_dummy_encryption(sb, ctx->test_dummy_enc_arg);
-
-       return ret;
+       ext4_apply_test_dummy_encryption(ctx, sb);
 }
 
 
@@ -4552,9 +4562,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
        if (err < 0)
                goto failed_mount;
 
-       err = ext4_apply_options(fc, sb);
-       if (err < 0)
-               goto failed_mount;
+       ext4_apply_options(fc, sb);
 
 #if IS_ENABLED(CONFIG_UNICODE)
        if (ext4_has_feature_casefold(sb) && !sb->s_encoding) {
@@ -5302,14 +5310,6 @@ no_journal:
                err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
                                          GFP_KERNEL);
        }
-       /*
-        * Update the checksum after updating free space/inode
-        * counters.  Otherwise the superblock can have an incorrect
-        * checksum in the buffer cache until it is written out and
-        * e2fsprogs programs trying to open a file system immediately
-        * after it is mounted can fail.
-        */
-       ext4_superblock_csum_set(sb);
        if (!err)
                err = percpu_counter_init(&sbi->s_dirs_counter,
                                          ext4_count_dirs(sb), GFP_KERNEL);
@@ -5367,6 +5367,14 @@ no_journal:
        EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
        ext4_orphan_cleanup(sb, es);
        EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
+       /*
+        * Update the checksum after updating free space/inode counters and
+        * ext4_orphan_cleanup. Otherwise the superblock can have an incorrect
+        * checksum in the buffer cache until it is written out and
+        * e2fsprogs programs trying to open a file system immediately
+        * after it is mounted can fail.
+        */
+       ext4_superblock_csum_set(sb);
        if (needs_recovery) {
                ext4_msg(sb, KERN_INFO, "recovery complete");
                err = ext4_mark_recovery_complete(sb, es);
@@ -5898,7 +5906,6 @@ static void ext4_update_super(struct super_block *sb)
 static int ext4_commit_super(struct super_block *sb)
 {
        struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
-       int error = 0;
 
        if (!sbh)
                return -EINVAL;
@@ -5907,6 +5914,13 @@ static int ext4_commit_super(struct super_block *sb)
 
        ext4_update_super(sb);
 
+       lock_buffer(sbh);
+       /* Buffer got discarded which means block device got invalidated */
+       if (!buffer_mapped(sbh)) {
+               unlock_buffer(sbh);
+               return -EIO;
+       }
+
        if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
                /*
                 * Oh, dear.  A previous attempt to write the
@@ -5921,17 +5935,21 @@ static int ext4_commit_super(struct super_block *sb)
                clear_buffer_write_io_error(sbh);
                set_buffer_uptodate(sbh);
        }
-       BUFFER_TRACE(sbh, "marking dirty");
-       mark_buffer_dirty(sbh);
-       error = __sync_dirty_buffer(sbh,
-               REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
+       get_bh(sbh);
+       /* Clear potential dirty bit if it was journalled update */
+       clear_buffer_dirty(sbh);
+       sbh->b_end_io = end_buffer_write_sync;
+       submit_bh(REQ_OP_WRITE,
+                 REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh);
+       wait_on_buffer(sbh);
        if (buffer_write_io_error(sbh)) {
                ext4_msg(sb, KERN_ERR, "I/O error while writing "
                       "superblock");
                clear_buffer_write_io_error(sbh);
                set_buffer_uptodate(sbh);
+               return -EIO;
        }
-       return error;
+       return 0;
 }
 
 /*
index 0423253..564e28a 100644 (file)
@@ -1895,11 +1895,10 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
 
                        unlock_buffer(bs->bh);
                        ea_bdebug(bs->bh, "cloning");
-                       s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
+                       s->base = kmemdup(BHDR(bs->bh), bs->bh->b_size, GFP_NOFS);
                        error = -ENOMEM;
                        if (s->base == NULL)
                                goto cleanup;
-                       memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
                        s->first = ENTRY(header(s->base)+1);
                        header(s->base)->h_refcount = cpu_to_le32(1);
                        s->here = ENTRY(s->base + offset);
index bd14cef..d66e37d 100644 (file)
@@ -861,10 +861,8 @@ static void __setattr_copy(struct user_namespace *mnt_userns,
 {
        unsigned int ia_valid = attr->ia_valid;
 
-       if (ia_valid & ATTR_UID)
-               inode->i_uid = attr->ia_uid;
-       if (ia_valid & ATTR_GID)
-               inode->i_gid = attr->ia_gid;
+       i_uid_update(mnt_userns, attr, inode);
+       i_gid_update(mnt_userns, attr, inode);
        if (ia_valid & ATTR_ATIME)
                inode->i_atime = attr->ia_atime;
        if (ia_valid & ATTR_MTIME)
@@ -917,17 +915,15 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
        if (err)
                return err;
 
-       if (is_quota_modification(inode, attr)) {
+       if (is_quota_modification(mnt_userns, inode, attr)) {
                err = f2fs_dquot_initialize(inode);
                if (err)
                        return err;
        }
-       if ((attr->ia_valid & ATTR_UID &&
-               !uid_eq(attr->ia_uid, inode->i_uid)) ||
-               (attr->ia_valid & ATTR_GID &&
-               !gid_eq(attr->ia_gid, inode->i_gid))) {
+       if (i_uid_needs_update(mnt_userns, attr, inode) ||
+           i_gid_needs_update(mnt_userns, attr, inode)) {
                f2fs_lock_op(F2FS_I_SB(inode));
-               err = dquot_transfer(inode, attr);
+               err = dquot_transfer(mnt_userns, inode, attr);
                if (err) {
                        set_sbi_flag(F2FS_I_SB(inode),
                                        SBI_QUOTA_NEED_REPAIR);
@@ -938,10 +934,8 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                 * update uid/gid under lock_op(), so that dquot and inode can
                 * be updated atomically.
                 */
-               if (attr->ia_valid & ATTR_UID)
-                       inode->i_uid = attr->ia_uid;
-               if (attr->ia_valid & ATTR_GID)
-                       inode->i_gid = attr->ia_gid;
+               i_uid_update(mnt_userns, attr, inode);
+               i_gid_update(mnt_userns, attr, inode);
                f2fs_mark_inode_dirty_sync(inode, true);
                f2fs_unlock_op(F2FS_I_SB(inode));
        }
index be599f3..d84c5f6 100644 (file)
@@ -91,8 +91,9 @@ static inline void __record_iostat_latency(struct f2fs_sb_info *sbi)
        unsigned int cnt;
        struct f2fs_iostat_latency iostat_lat[MAX_IO_TYPE][NR_PAGE_TYPE];
        struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
+       unsigned long flags;
 
-       spin_lock_bh(&sbi->iostat_lat_lock);
+       spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
        for (idx = 0; idx < MAX_IO_TYPE; idx++) {
                for (io = 0; io < NR_PAGE_TYPE; io++) {
                        cnt = io_lat->bio_cnt[idx][io];
@@ -106,7 +107,7 @@ static inline void __record_iostat_latency(struct f2fs_sb_info *sbi)
                        io_lat->bio_cnt[idx][io] = 0;
                }
        }
-       spin_unlock_bh(&sbi->iostat_lat_lock);
+       spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
 
        trace_f2fs_iostat_latency(sbi, iostat_lat);
 }
@@ -115,14 +116,15 @@ static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi)
 {
        unsigned long long iostat_diff[NR_IO_TYPE];
        int i;
+       unsigned long flags;
 
        if (time_is_after_jiffies(sbi->iostat_next_period))
                return;
 
        /* Need double check under the lock */
-       spin_lock_bh(&sbi->iostat_lock);
+       spin_lock_irqsave(&sbi->iostat_lock, flags);
        if (time_is_after_jiffies(sbi->iostat_next_period)) {
-               spin_unlock_bh(&sbi->iostat_lock);
+               spin_unlock_irqrestore(&sbi->iostat_lock, flags);
                return;
        }
        sbi->iostat_next_period = jiffies +
@@ -133,7 +135,7 @@ static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi)
                                sbi->prev_rw_iostat[i];
                sbi->prev_rw_iostat[i] = sbi->rw_iostat[i];
        }
-       spin_unlock_bh(&sbi->iostat_lock);
+       spin_unlock_irqrestore(&sbi->iostat_lock, flags);
 
        trace_f2fs_iostat(sbi, iostat_diff);
 
@@ -145,25 +147,27 @@ void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
        struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
        int i;
 
-       spin_lock_bh(&sbi->iostat_lock);
+       spin_lock_irq(&sbi->iostat_lock);
        for (i = 0; i < NR_IO_TYPE; i++) {
                sbi->rw_iostat[i] = 0;
                sbi->prev_rw_iostat[i] = 0;
        }
-       spin_unlock_bh(&sbi->iostat_lock);
+       spin_unlock_irq(&sbi->iostat_lock);
 
-       spin_lock_bh(&sbi->iostat_lat_lock);
+       spin_lock_irq(&sbi->iostat_lat_lock);
        memset(io_lat, 0, sizeof(struct iostat_lat_info));
-       spin_unlock_bh(&sbi->iostat_lat_lock);
+       spin_unlock_irq(&sbi->iostat_lat_lock);
 }
 
 void f2fs_update_iostat(struct f2fs_sb_info *sbi,
                        enum iostat_type type, unsigned long long io_bytes)
 {
+       unsigned long flags;
+
        if (!sbi->iostat_enable)
                return;
 
-       spin_lock_bh(&sbi->iostat_lock);
+       spin_lock_irqsave(&sbi->iostat_lock, flags);
        sbi->rw_iostat[type] += io_bytes;
 
        if (type == APP_BUFFERED_IO || type == APP_DIRECT_IO)
@@ -172,7 +176,7 @@ void f2fs_update_iostat(struct f2fs_sb_info *sbi,
        if (type == APP_BUFFERED_READ_IO || type == APP_DIRECT_READ_IO)
                sbi->rw_iostat[APP_READ_IO] += io_bytes;
 
-       spin_unlock_bh(&sbi->iostat_lock);
+       spin_unlock_irqrestore(&sbi->iostat_lock, flags);
 
        f2fs_record_iostat(sbi);
 }
@@ -185,6 +189,7 @@ static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
        struct f2fs_sb_info *sbi = iostat_ctx->sbi;
        struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
        int idx;
+       unsigned long flags;
 
        if (!sbi->iostat_enable)
                return;
@@ -202,12 +207,12 @@ static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
                        idx = WRITE_ASYNC_IO;
        }
 
-       spin_lock_bh(&sbi->iostat_lat_lock);
+       spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
        io_lat->sum_lat[idx][iotype] += ts_diff;
        io_lat->bio_cnt[idx][iotype]++;
        if (ts_diff > io_lat->peak_lat[idx][iotype])
                io_lat->peak_lat[idx][iotype] = ts_diff;
-       spin_unlock_bh(&sbi->iostat_lat_lock);
+       spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
 }
 
 void iostat_update_and_unbind_ctx(struct bio *bio, int rw)
index c549acb..bf00d50 100644 (file)
@@ -89,8 +89,6 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
        if (test_opt(sbi, INLINE_XATTR))
                set_inode_flag(inode, FI_INLINE_XATTR);
 
-       if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
-               set_inode_flag(inode, FI_INLINE_DATA);
        if (f2fs_may_inline_dentry(inode))
                set_inode_flag(inode, FI_INLINE_DENTRY);
 
@@ -107,10 +105,6 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
 
        f2fs_init_extent_tree(inode, NULL);
 
-       stat_inc_inline_xattr(inode);
-       stat_inc_inline_inode(inode);
-       stat_inc_inline_dir(inode);
-
        F2FS_I(inode)->i_flags =
                f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED);
 
@@ -127,6 +121,14 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
                        set_compress_context(inode);
        }
 
+       /* Should enable inline_data after compression set */
+       if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
+               set_inode_flag(inode, FI_INLINE_DATA);
+
+       stat_inc_inline_xattr(inode);
+       stat_inc_inline_inode(inode);
+       stat_inc_inline_dir(inode);
+
        f2fs_set_inode_flags(inode);
 
        trace_f2fs_new_inode(inode, 0);
@@ -325,6 +327,9 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
                if (!is_extension_exist(name, ext[i], false))
                        continue;
 
+               /* Do not use inline_data with compression */
+               stat_dec_inline_inode(inode);
+               clear_inode_flag(inode, FI_INLINE_DATA);
                set_compress_context(inode);
                return;
        }
index 836c79a..cf6f7fc 100644 (file)
@@ -1450,7 +1450,9 @@ page_hit:
 out_err:
        ClearPageUptodate(page);
 out_put_err:
-       f2fs_handle_page_eio(sbi, page->index, NODE);
+       /* ENOENT comes from read_node_page which is not an error. */
+       if (err != -ENOENT)
+               f2fs_handle_page_eio(sbi, page->index, NODE);
        f2fs_put_page(page, 1);
        return ERR_PTR(err);
 }
index 3cb7f8a..dcd0a1e 100644 (file)
@@ -255,18 +255,18 @@ static int recover_quota_data(struct inode *inode, struct page *page)
 
        memset(&attr, 0, sizeof(attr));
 
-       attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
-       attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
+       attr.ia_vfsuid = VFSUIDT_INIT(make_kuid(inode->i_sb->s_user_ns, i_uid));
+       attr.ia_vfsgid = VFSGIDT_INIT(make_kgid(inode->i_sb->s_user_ns, i_gid));
 
-       if (!uid_eq(attr.ia_uid, inode->i_uid))
+       if (!vfsuid_eq(attr.ia_vfsuid, i_uid_into_vfsuid(&init_user_ns, inode)))
                attr.ia_valid |= ATTR_UID;
-       if (!gid_eq(attr.ia_gid, inode->i_gid))
+       if (!vfsgid_eq(attr.ia_vfsgid, i_gid_into_vfsgid(&init_user_ns, inode)))
                attr.ia_valid |= ATTR_GID;
 
        if (!attr.ia_valid)
                return 0;
 
-       err = dquot_transfer(inode, &attr);
+       err = dquot_transfer(&init_user_ns, inode, &attr);
        if (err)
                set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
        return err;
index 3dae3ed..3e4eb34 100644 (file)
@@ -90,7 +90,8 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
         * out the RO attribute for checking by the security
         * module, just because it maps to a file mode.
         */
-       err = security_inode_setattr(file->f_path.dentry, &ia);
+       err = security_inode_setattr(file_mnt_user_ns(file),
+                                    file->f_path.dentry, &ia);
        if (err)
                goto out_unlock_inode;
 
@@ -516,9 +517,11 @@ int fat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
        }
 
        if (((attr->ia_valid & ATTR_UID) &&
-            (!uid_eq(attr->ia_uid, sbi->options.fs_uid))) ||
+            (!uid_eq(from_vfsuid(mnt_userns, i_user_ns(inode), attr->ia_vfsuid),
+                     sbi->options.fs_uid))) ||
            ((attr->ia_valid & ATTR_GID) &&
-            (!gid_eq(attr->ia_gid, sbi->options.fs_gid))) ||
+            (!gid_eq(from_vfsgid(mnt_userns, i_user_ns(inode), attr->ia_vfsgid),
+                     sbi->options.fs_gid))) ||
            ((attr->ia_valid & ATTR_MODE) &&
             (attr->ia_mode & ~FAT_VALID_MODE)))
                error = -EPERM;
index 9d3cf01..7492082 100644 (file)
@@ -372,17 +372,22 @@ nomem:
        return NULL;
 }
 
+static inline bool fscache_cookie_is_dropped(struct fscache_cookie *cookie)
+{
+       return READ_ONCE(cookie->state) == FSCACHE_COOKIE_STATE_DROPPED;
+}
+
 static void fscache_wait_on_collision(struct fscache_cookie *candidate,
                                      struct fscache_cookie *wait_for)
 {
        enum fscache_cookie_state *statep = &wait_for->state;
 
-       wait_var_event_timeout(statep, READ_ONCE(*statep) == FSCACHE_COOKIE_STATE_DROPPED,
+       wait_var_event_timeout(statep, fscache_cookie_is_dropped(wait_for),
                               20 * HZ);
-       if (READ_ONCE(*statep) != FSCACHE_COOKIE_STATE_DROPPED) {
+       if (!fscache_cookie_is_dropped(wait_for)) {
                pr_notice("Potential collision c=%08x old: c=%08x",
                          candidate->debug_id, wait_for->debug_id);
-               wait_var_event(statep, READ_ONCE(*statep) == FSCACHE_COOKIE_STATE_DROPPED);
+               wait_var_event(statep, fscache_cookie_is_dropped(wait_for));
        }
 }
 
@@ -517,7 +522,14 @@ static void fscache_perform_lookup(struct fscache_cookie *cookie)
        }
 
        fscache_see_cookie(cookie, fscache_cookie_see_active);
-       fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE);
+       spin_lock(&cookie->lock);
+       if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
+               __fscache_set_cookie_state(cookie,
+                                          FSCACHE_COOKIE_STATE_INVALIDATING);
+       else
+               __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE);
+       spin_unlock(&cookie->lock);
+       wake_up_cookie_state(cookie);
        trace = fscache_access_lookup_cookie_end;
 
 out:
@@ -752,6 +764,9 @@ again_locked:
                        spin_lock(&cookie->lock);
                }
 
+               if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
+                       fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
+
                switch (state) {
                case FSCACHE_COOKIE_STATE_RELINQUISHING:
                        fscache_see_cookie(cookie, fscache_cookie_see_relinquish);
@@ -1048,6 +1063,9 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
                return;
 
        case FSCACHE_COOKIE_STATE_LOOKING_UP:
+               __fscache_begin_cookie_access(cookie, fscache_access_invalidate_cookie);
+               set_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags);
+               fallthrough;
        case FSCACHE_COOKIE_STATE_CREATING:
                spin_unlock(&cookie->lock);
                _leave(" [look %x]", cookie->inval_counter);
index f2aa7db..a058e01 100644 (file)
@@ -143,7 +143,7 @@ static void fscache_wait_on_volume_collision(struct fscache_volume *candidate,
 {
        wait_var_event_timeout(&candidate->flags,
                               !fscache_is_acquire_pending(candidate), 20 * HZ);
-       if (!fscache_is_acquire_pending(candidate)) {
+       if (fscache_is_acquire_pending(candidate)) {
                pr_notice("Potential volume collision new=%08x old=%08x",
                          candidate->debug_id, collidee_debug_id);
                fscache_stat(&fscache_n_volumes_collision);
@@ -182,7 +182,7 @@ static bool fscache_hash_volume(struct fscache_volume *candidate)
        hlist_bl_add_head(&candidate->hash_link, h);
        hlist_bl_unlock(h);
 
-       if (test_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &candidate->flags))
+       if (fscache_is_acquire_pending(candidate))
                fscache_wait_on_volume_collision(candidate, collidee_debug_id);
        return true;
 
index 6240804..02eb723 100644 (file)
@@ -600,41 +600,79 @@ static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
        remove_inode_hugepages(inode, offset, LLONG_MAX);
 }
 
+static void hugetlbfs_zero_partial_page(struct hstate *h,
+                                       struct address_space *mapping,
+                                       loff_t start,
+                                       loff_t end)
+{
+       pgoff_t idx = start >> huge_page_shift(h);
+       struct folio *folio;
+
+       folio = filemap_lock_folio(mapping, idx);
+       if (!folio)
+               return;
+
+       start = start & ~huge_page_mask(h);
+       end = end & ~huge_page_mask(h);
+       if (!end)
+               end = huge_page_size(h);
+
+       folio_zero_segment(folio, (size_t)start, (size_t)end);
+
+       folio_unlock(folio);
+       folio_put(folio);
+}
+
 static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 {
+       struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
+       struct address_space *mapping = inode->i_mapping;
        struct hstate *h = hstate_inode(inode);
        loff_t hpage_size = huge_page_size(h);
        loff_t hole_start, hole_end;
 
        /*
-        * For hole punch round up the beginning offset of the hole and
-        * round down the end.
+        * hole_start and hole_end indicate the full pages within the hole.
         */
        hole_start = round_up(offset, hpage_size);
        hole_end = round_down(offset + len, hpage_size);
 
-       if (hole_end > hole_start) {
-               struct address_space *mapping = inode->i_mapping;
-               struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
+       inode_lock(inode);
 
-               inode_lock(inode);
+       /* protected by i_rwsem */
+       if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
+               inode_unlock(inode);
+               return -EPERM;
+       }
 
-               /* protected by i_rwsem */
-               if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
-                       inode_unlock(inode);
-                       return -EPERM;
-               }
+       i_mmap_lock_write(mapping);
+
+       /* If range starts before first full page, zero partial page. */
+       if (offset < hole_start)
+               hugetlbfs_zero_partial_page(h, mapping,
+                               offset, min(offset + len, hole_start));
 
-               i_mmap_lock_write(mapping);
+       /* Unmap users of full pages in the hole. */
+       if (hole_end > hole_start) {
                if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
                        hugetlb_vmdelete_list(&mapping->i_mmap,
                                              hole_start >> PAGE_SHIFT,
                                              hole_end >> PAGE_SHIFT, 0);
-               i_mmap_unlock_write(mapping);
-               remove_inode_hugepages(inode, hole_start, hole_end);
-               inode_unlock(inode);
        }
 
+       /* If range extends beyond last full page, zero partial page. */
+       if ((offset + len) > hole_end && (offset + len) > hole_start)
+               hugetlbfs_zero_partial_page(h, mapping,
+                               hole_end, offset + len);
+
+       i_mmap_unlock_write(mapping);
+
+       /* Remove full pages from the file. */
+       if (hole_end > hole_start)
+               remove_inode_hugepages(inode, hole_start, hole_end);
+
+       inode_unlock(inode);
+
        return 0;
 }
 
index 3aab418..e8e769b 100644 (file)
@@ -298,8 +298,8 @@ struct io_buffer_list {
        /* below is for ring provided buffers */
        __u16 buf_nr_pages;
        __u16 nr_entries;
-       __u32 head;
-       __u32 mask;
+       __u16 head;
+       __u16 mask;
 };
 
 struct io_buffer {
@@ -576,7 +576,6 @@ struct io_close {
        struct file                     *file;
        int                             fd;
        u32                             file_slot;
-       u32                             flags;
 };
 
 struct io_timeout_data {
@@ -784,12 +783,6 @@ struct io_msg {
        u32 len;
 };
 
-struct io_nop {
-       struct file                     *file;
-       u64                             extra1;
-       u64                             extra2;
-};
-
 struct io_async_connect {
        struct sockaddr_storage         address;
 };
@@ -851,6 +844,7 @@ enum {
        REQ_F_SINGLE_POLL_BIT,
        REQ_F_DOUBLE_POLL_BIT,
        REQ_F_PARTIAL_IO_BIT,
+       REQ_F_CQE32_INIT_BIT,
        REQ_F_APOLL_MULTISHOT_BIT,
        /* keep async read/write and isreg together and in order */
        REQ_F_SUPPORT_NOWAIT_BIT,
@@ -920,6 +914,8 @@ enum {
        REQ_F_PARTIAL_IO        = BIT(REQ_F_PARTIAL_IO_BIT),
        /* fast poll multishot mode */
        REQ_F_APOLL_MULTISHOT   = BIT(REQ_F_APOLL_MULTISHOT_BIT),
+       /* ->extra1 and ->extra2 are initialised */
+       REQ_F_CQE32_INIT        = BIT(REQ_F_CQE32_INIT_BIT),
 };
 
 struct async_poll {
@@ -994,7 +990,6 @@ struct io_kiocb {
                struct io_msg           msg;
                struct io_xattr         xattr;
                struct io_socket        sock;
-               struct io_nop           nop;
                struct io_uring_cmd     uring_cmd;
        };
 
@@ -1121,7 +1116,6 @@ static const struct io_op_def io_op_defs[] = {
        [IORING_OP_NOP] = {
                .audit_skip             = 1,
                .iopoll                 = 1,
-               .buffer_select          = 1,
        },
        [IORING_OP_READV] = {
                .needs_file             = 1,
@@ -1189,6 +1183,7 @@ static const struct io_op_def io_op_defs[] = {
                .unbound_nonreg_file    = 1,
                .pollout                = 1,
                .needs_async_setup      = 1,
+               .ioprio                 = 1,
                .async_size             = sizeof(struct io_async_msghdr),
        },
        [IORING_OP_RECVMSG] = {
@@ -1197,6 +1192,7 @@ static const struct io_op_def io_op_defs[] = {
                .pollin                 = 1,
                .buffer_select          = 1,
                .needs_async_setup      = 1,
+               .ioprio                 = 1,
                .async_size             = sizeof(struct io_async_msghdr),
        },
        [IORING_OP_TIMEOUT] = {
@@ -1272,6 +1268,7 @@ static const struct io_op_def io_op_defs[] = {
                .unbound_nonreg_file    = 1,
                .pollout                = 1,
                .audit_skip             = 1,
+               .ioprio                 = 1,
        },
        [IORING_OP_RECV] = {
                .needs_file             = 1,
@@ -1279,6 +1276,7 @@ static const struct io_op_def io_op_defs[] = {
                .pollin                 = 1,
                .buffer_select          = 1,
                .audit_skip             = 1,
+               .ioprio                 = 1,
        },
        [IORING_OP_OPENAT2] = {
        },
@@ -1729,9 +1727,24 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
 
        if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
                return;
-       /* don't recycle if we already did IO to this buffer */
-       if (req->flags & REQ_F_PARTIAL_IO)
+       /*
+        * For legacy provided buffer mode, don't recycle if we already did
+        * IO to this buffer. For ring-mapped provided buffer mode, we should
+        * increment ring->head to explicitly monopolize the buffer to avoid
+        * multiple use.
+        */
+       if ((req->flags & REQ_F_BUFFER_SELECTED) &&
+           (req->flags & REQ_F_PARTIAL_IO))
+               return;
+
+       /*
+        * READV uses fields in `struct io_rw` (len/addr) to stash the selected
+        * buffer data. However if that buffer is recycled the original request
+        * data stored in addr is lost. Therefore forbid recycling for now.
+        */
+       if (req->opcode == IORING_OP_READV)
                return;
+
        /*
         * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
         * the flag and hence ensure that bl->head doesn't get incremented.
@@ -1739,8 +1752,13 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
         */
        if (req->flags & REQ_F_BUFFER_RING) {
                if (req->buf_list) {
-                       req->buf_index = req->buf_list->bgid;
-                       req->flags &= ~REQ_F_BUFFER_RING;
+                       if (req->flags & REQ_F_PARTIAL_IO) {
+                               req->buf_list->head++;
+                               req->buf_list = NULL;
+                       } else {
+                               req->buf_index = req->buf_list->bgid;
+                               req->flags &= ~REQ_F_BUFFER_RING;
+                       }
                }
                return;
        }
@@ -1969,7 +1987,7 @@ static inline void io_req_track_inflight(struct io_kiocb *req)
 {
        if (!(req->flags & REQ_F_INFLIGHT)) {
                req->flags |= REQ_F_INFLIGHT;
-               atomic_inc(&current->io_uring->inflight_tracked);
+               atomic_inc(&req->task->io_uring->inflight_tracked);
        }
 }
 
@@ -2441,94 +2459,66 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
        return true;
 }
 
-static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
-                                s32 res, u32 cflags)
+static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
+                                    struct io_kiocb *req)
 {
        struct io_uring_cqe *cqe;
 
-       /*
-        * If we can't get a cq entry, userspace overflowed the
-        * submission (by quite a lot). Increment the overflow count in
-        * the ring.
-        */
-       cqe = io_get_cqe(ctx);
-       if (likely(cqe)) {
-               WRITE_ONCE(cqe->user_data, user_data);
-               WRITE_ONCE(cqe->res, res);
-               WRITE_ONCE(cqe->flags, cflags);
-               return true;
-       }
-       return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
-}
+       if (!(ctx->flags & IORING_SETUP_CQE32)) {
+               trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
+                                       req->cqe.res, req->cqe.flags, 0, 0);
 
-static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx,
-                                           struct io_kiocb *req)
-{
-       struct io_uring_cqe *cqe;
+               /*
+                * If we can't get a cq entry, userspace overflowed the
+                * submission (by quite a lot). Increment the overflow count in
+                * the ring.
+                */
+               cqe = io_get_cqe(ctx);
+               if (likely(cqe)) {
+                       memcpy(cqe, &req->cqe, sizeof(*cqe));
+                       return true;
+               }
 
-       trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
-                               req->cqe.res, req->cqe.flags, 0, 0);
+               return io_cqring_event_overflow(ctx, req->cqe.user_data,
+                                               req->cqe.res, req->cqe.flags,
+                                               0, 0);
+       } else {
+               u64 extra1 = 0, extra2 = 0;
 
-       /*
-        * If we can't get a cq entry, userspace overflowed the
-        * submission (by quite a lot). Increment the overflow count in
-        * the ring.
-        */
-       cqe = io_get_cqe(ctx);
-       if (likely(cqe)) {
-               memcpy(cqe, &req->cqe, sizeof(*cqe));
-               return true;
-       }
-       return io_cqring_event_overflow(ctx, req->cqe.user_data,
-                                       req->cqe.res, req->cqe.flags, 0, 0);
-}
+               if (req->flags & REQ_F_CQE32_INIT) {
+                       extra1 = req->extra1;
+                       extra2 = req->extra2;
+               }
 
-static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx,
-                                             struct io_kiocb *req)
-{
-       struct io_uring_cqe *cqe;
-       u64 extra1 = req->extra1;
-       u64 extra2 = req->extra2;
+               trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
+                                       req->cqe.res, req->cqe.flags, extra1, extra2);
 
-       trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
-                               req->cqe.res, req->cqe.flags, extra1, extra2);
+               /*
+                * If we can't get a cq entry, userspace overflowed the
+                * submission (by quite a lot). Increment the overflow count in
+                * the ring.
+                */
+               cqe = io_get_cqe(ctx);
+               if (likely(cqe)) {
+                       memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe));
+                       WRITE_ONCE(cqe->big_cqe[0], extra1);
+                       WRITE_ONCE(cqe->big_cqe[1], extra2);
+                       return true;
+               }
 
-       /*
-        * If we can't get a cq entry, userspace overflowed the
-        * submission (by quite a lot). Increment the overflow count in
-        * the ring.
-        */
-       cqe = io_get_cqe(ctx);
-       if (likely(cqe)) {
-               memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe));
-               cqe->big_cqe[0] = extra1;
-               cqe->big_cqe[1] = extra2;
-               return true;
+               return io_cqring_event_overflow(ctx, req->cqe.user_data,
+                               req->cqe.res, req->cqe.flags,
+                               extra1, extra2);
        }
-
-       return io_cqring_event_overflow(ctx, req->cqe.user_data, req->cqe.res,
-                                       req->cqe.flags, extra1, extra2);
-}
-
-static inline bool __io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
-{
-       trace_io_uring_complete(req->ctx, req, req->cqe.user_data, res, cflags, 0, 0);
-       return __io_fill_cqe(req->ctx, req->cqe.user_data, res, cflags);
 }
 
-static inline void __io_fill_cqe32_req(struct io_kiocb *req, s32 res, u32 cflags,
-                               u64 extra1, u64 extra2)
+static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
+                                    s32 res, u32 cflags)
 {
-       struct io_ring_ctx *ctx = req->ctx;
        struct io_uring_cqe *cqe;
 
-       if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_CQE32)))
-               return;
-       if (req->flags & REQ_F_CQE_SKIP)
-               return;
-
-       trace_io_uring_complete(ctx, req, req->cqe.user_data, res, cflags,
-                               extra1, extra2);
+       ctx->cq_extra++;
+       trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
 
        /*
         * If we can't get a cq entry, userspace overflowed the
@@ -2537,23 +2527,17 @@ static inline void __io_fill_cqe32_req(struct io_kiocb *req, s32 res, u32 cflags
         */
        cqe = io_get_cqe(ctx);
        if (likely(cqe)) {
-               WRITE_ONCE(cqe->user_data, req->cqe.user_data);
+               WRITE_ONCE(cqe->user_data, user_data);
                WRITE_ONCE(cqe->res, res);
                WRITE_ONCE(cqe->flags, cflags);
-               WRITE_ONCE(cqe->big_cqe[0], extra1);
-               WRITE_ONCE(cqe->big_cqe[1], extra2);
-               return;
-       }
 
-       io_cqring_event_overflow(ctx, req->cqe.user_data, res, cflags, extra1, extra2);
-}
-
-static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
-                                    s32 res, u32 cflags)
-{
-       ctx->cq_extra++;
-       trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
-       return __io_fill_cqe(ctx, user_data, res, cflags);
+               if (ctx->flags & IORING_SETUP_CQE32) {
+                       WRITE_ONCE(cqe->big_cqe[0], 0);
+                       WRITE_ONCE(cqe->big_cqe[1], 0);
+               }
+               return true;
+       }
+       return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
 }
 
 static void __io_req_complete_put(struct io_kiocb *req)
@@ -2590,16 +2574,11 @@ static void __io_req_complete_put(struct io_kiocb *req)
 static void __io_req_complete_post(struct io_kiocb *req, s32 res,
                                   u32 cflags)
 {
-       if (!(req->flags & REQ_F_CQE_SKIP))
-               __io_fill_cqe_req(req, res, cflags);
-       __io_req_complete_put(req);
-}
-
-static void __io_req_complete_post32(struct io_kiocb *req, s32 res,
-                                  u32 cflags, u64 extra1, u64 extra2)
-{
-       if (!(req->flags & REQ_F_CQE_SKIP))
-               __io_fill_cqe32_req(req, res, cflags, extra1, extra2);
+       if (!(req->flags & REQ_F_CQE_SKIP)) {
+               req->cqe.res = res;
+               req->cqe.flags = cflags;
+               __io_fill_cqe_req(req->ctx, req);
+       }
        __io_req_complete_put(req);
 }
 
@@ -2614,18 +2593,6 @@ static void io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags)
        io_cqring_ev_posted(ctx);
 }
 
-static void io_req_complete_post32(struct io_kiocb *req, s32 res,
-                                  u32 cflags, u64 extra1, u64 extra2)
-{
-       struct io_ring_ctx *ctx = req->ctx;
-
-       spin_lock(&ctx->completion_lock);
-       __io_req_complete_post32(req, res, cflags, extra1, extra2);
-       io_commit_cqring(ctx);
-       spin_unlock(&ctx->completion_lock);
-       io_cqring_ev_posted(ctx);
-}
-
 static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
                                         u32 cflags)
 {
@@ -2643,19 +2610,6 @@ static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
                io_req_complete_post(req, res, cflags);
 }
 
-static inline void __io_req_complete32(struct io_kiocb *req,
-                                      unsigned int issue_flags, s32 res,
-                                      u32 cflags, u64 extra1, u64 extra2)
-{
-       if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
-               io_req_complete_state(req, res, cflags);
-               req->extra1 = extra1;
-               req->extra2 = extra2;
-       } else {
-               io_req_complete_post32(req, res, cflags, extra1, extra2);
-       }
-}
-
 static inline void io_req_complete(struct io_kiocb *req, s32 res)
 {
        if (res < 0)
@@ -3202,12 +3156,8 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
                        struct io_kiocb *req = container_of(node, struct io_kiocb,
                                                    comp_list);
 
-                       if (!(req->flags & REQ_F_CQE_SKIP)) {
-                               if (!(ctx->flags & IORING_SETUP_CQE32))
-                                       __io_fill_cqe_req_filled(ctx, req);
-                               else
-                                       __io_fill_cqe32_req_filled(ctx, req);
-                       }
+                       if (!(req->flags & REQ_F_CQE_SKIP))
+                               __io_fill_cqe_req(ctx, req);
                }
 
                io_commit_cqring(ctx);
@@ -3326,7 +3276,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
                nr_events++;
                if (unlikely(req->flags & REQ_F_CQE_SKIP))
                        continue;
-               __io_fill_cqe_req(req, req->cqe.res, io_put_kbuf(req, 0));
+
+               req->cqe.flags = io_put_kbuf(req, 0);
+               __io_fill_cqe_req(req->ctx, req);
        }
 
        if (unlikely(!nr_events))
@@ -3497,7 +3449,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
        if (unlikely(res != req->cqe.res)) {
                if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
                    io_rw_should_reissue(req)) {
-                       req->flags |= REQ_F_REISSUE;
+                       req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
                        return true;
                }
                req_set_fail(req);
@@ -3547,7 +3499,7 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
                kiocb_end_write(req);
        if (unlikely(res != req->cqe.res)) {
                if (res == -EAGAIN && io_rw_should_reissue(req)) {
-                       req->flags |= REQ_F_REISSUE;
+                       req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
                        return;
                }
                req->cqe.res = res;
@@ -3677,6 +3629,20 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        int ret;
 
        kiocb->ki_pos = READ_ONCE(sqe->off);
+       /* used for fixed read/write too - just read unconditionally */
+       req->buf_index = READ_ONCE(sqe->buf_index);
+
+       if (req->opcode == IORING_OP_READ_FIXED ||
+           req->opcode == IORING_OP_WRITE_FIXED) {
+               struct io_ring_ctx *ctx = req->ctx;
+               u16 index;
+
+               if (unlikely(req->buf_index >= ctx->nr_user_bufs))
+                       return -EFAULT;
+               index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
+               req->imu = ctx->user_bufs[index];
+               io_req_set_rsrc_node(req, ctx, 0);
+       }
 
        ioprio = READ_ONCE(sqe->ioprio);
        if (ioprio) {
@@ -3689,12 +3655,9 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                kiocb->ki_ioprio = get_current_ioprio();
        }
 
-       req->imu = NULL;
        req->rw.addr = READ_ONCE(sqe->addr);
        req->rw.len = READ_ONCE(sqe->len);
        req->rw.flags = READ_ONCE(sqe->rw_flags);
-       /* used for fixed read/write too - just read unconditionally */
-       req->buf_index = READ_ONCE(sqe->buf_index);
        return 0;
 }
 
@@ -3826,20 +3789,9 @@ static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter
 static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
                           unsigned int issue_flags)
 {
-       struct io_mapped_ubuf *imu = req->imu;
-       u16 index, buf_index = req->buf_index;
-
-       if (likely(!imu)) {
-               struct io_ring_ctx *ctx = req->ctx;
-
-               if (unlikely(buf_index >= ctx->nr_user_bufs))
-                       return -EFAULT;
-               io_req_set_rsrc_node(req, ctx, issue_flags);
-               index = array_index_nospec(buf_index, ctx->nr_user_bufs);
-               imu = READ_ONCE(ctx->user_bufs[index]);
-               req->imu = imu;
-       }
-       return __io_import_fixed(req, rw, iter, imu);
+       if (WARN_ON_ONCE(!req->imu))
+               return -EFAULT;
+       return __io_import_fixed(req, rw, iter, req->imu);
 }
 
 static int io_buffer_add_list(struct io_ring_ctx *ctx,
@@ -3876,19 +3828,17 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
 {
        struct io_uring_buf_ring *br = bl->buf_ring;
        struct io_uring_buf *buf;
-       __u32 head = bl->head;
+       __u16 head = bl->head;
 
-       if (unlikely(smp_load_acquire(&br->tail) == head)) {
-               io_ring_submit_unlock(req->ctx, issue_flags);
+       if (unlikely(smp_load_acquire(&br->tail) == head))
                return NULL;
-       }
 
        head &= bl->mask;
        if (head < IO_BUFFER_LIST_BUF_PER_PAGE) {
                buf = &br->bufs[head];
        } else {
                int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1);
-               int index = head / IO_BUFFER_LIST_BUF_PER_PAGE - 1;
+               int index = head / IO_BUFFER_LIST_BUF_PER_PAGE;
                buf = page_address(bl->buf_pages[index]);
                buf += off;
        }
@@ -3898,7 +3848,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
        req->buf_list = bl;
        req->buf_index = buf->bid;
 
-       if (issue_flags & IO_URING_F_UNLOCKED) {
+       if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) {
                /*
                 * If we came in unlocked, we have no choice but to consume the
                 * buffer here. This does mean it'll be pinned until the IO
@@ -4376,18 +4326,19 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                if (unlikely(ret < 0))
                        return ret;
        } else {
+               rw = req->async_data;
+               s = &rw->s;
+
                /*
                 * Safe and required to re-import if we're using provided
                 * buffers, as we dropped the selected one before retry.
                 */
-               if (req->flags & REQ_F_BUFFER_SELECT) {
+               if (io_do_buffer_select(req)) {
                        ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
                        if (unlikely(ret < 0))
                                return ret;
                }
 
-               rw = req->async_data;
-               s = &rw->s;
                /*
                 * We come here from an earlier attempt, restore our state to
                 * match in case it doesn't. It's cheap enough that we don't
@@ -5079,10 +5030,18 @@ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
 
        req->uring_cmd.task_work_cb = task_work_cb;
        req->io_task_work.func = io_uring_cmd_work;
-       io_req_task_prio_work_add(req);
+       io_req_task_work_add(req);
 }
 EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task);
 
+static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
+                                         u64 extra1, u64 extra2)
+{
+       req->extra1 = extra1;
+       req->extra2 = extra2;
+       req->flags |= REQ_F_CQE32_INIT;
+}
+
 /*
  * Called by consumers of io_uring_cmd, if they originally returned
  * -EIOCBQUEUED upon receiving the command.
@@ -5093,10 +5052,10 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
 
        if (ret < 0)
                req_set_fail(req);
+
        if (req->ctx->flags & IORING_SETUP_CQE32)
-               __io_req_complete32(req, 0, ret, 0, res2, 0);
-       else
-               io_req_complete(req, ret);
+               io_req_set_cqe32_extra(req, res2, 0);
+       io_req_complete(req, ret);
 }
 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
 
@@ -5115,7 +5074,7 @@ static int io_uring_cmd_prep(struct io_kiocb *req,
 {
        struct io_uring_cmd *ioucmd = &req->uring_cmd;
 
-       if (sqe->rw_flags)
+       if (sqe->rw_flags || sqe->__pad1)
                return -EINVAL;
        ioucmd->cmd = sqe->cmd;
        ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
@@ -5258,14 +5217,6 @@ done:
 
 static int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       /*
-        * If the ring is setup with CQE32, relay back addr/addr
-        */
-       if (req->ctx->flags & IORING_SETUP_CQE32) {
-               req->nop.extra1 = READ_ONCE(sqe->addr);
-               req->nop.extra2 = READ_ONCE(sqe->addr2);
-       }
-
        return 0;
 }
 
@@ -5274,23 +5225,7 @@ static int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  */
 static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
 {
-       unsigned int cflags;
-       void __user *buf;
-
-       if (req->flags & REQ_F_BUFFER_SELECT) {
-               size_t len = 1;
-
-               buf = io_buffer_select(req, &len, issue_flags);
-               if (!buf)
-                       return -ENOBUFS;
-       }
-
-       cflags = io_put_kbuf(req, issue_flags);
-       if (!(req->ctx->flags & IORING_SETUP_CQE32))
-               __io_req_complete(req, issue_flags, 0, cflags);
-       else
-               __io_req_complete32(req, issue_flags, 0, cflags,
-                                   req->nop.extra1, req->nop.extra2);
+       __io_req_complete(req, issue_flags, 0, 0);
        return 0;
 }
 
@@ -5988,18 +5923,14 @@ static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
 
 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       if (sqe->off || sqe->addr || sqe->len || sqe->buf_index)
+       if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
                return -EINVAL;
        if (req->flags & REQ_F_FIXED_FILE)
                return -EBADF;
 
        req->close.fd = READ_ONCE(sqe->fd);
        req->close.file_slot = READ_ONCE(sqe->file_index);
-       req->close.flags = READ_ONCE(sqe->close_flags);
-       if (req->close.flags & ~IORING_CLOSE_FD_AND_FILE_SLOT)
-               return -EINVAL;
-       if (!(req->close.flags & IORING_CLOSE_FD_AND_FILE_SLOT) &&
-           req->close.file_slot && req->close.fd)
+       if (req->close.file_slot && req->close.fd)
                return -EINVAL;
 
        return 0;
@@ -6015,8 +5946,7 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags)
 
        if (req->close.file_slot) {
                ret = io_close_fixed(req, issue_flags);
-               if (ret || !(req->close.flags & IORING_CLOSE_FD_AND_FILE_SLOT))
-                       goto err;
+               goto err;
        }
 
        spin_lock(&files->file_lock);
@@ -6158,14 +6088,12 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_sr_msg *sr = &req->sr_msg;
 
-       if (unlikely(sqe->file_index))
-               return -EINVAL;
-       if (unlikely(sqe->addr2 || sqe->file_index))
+       if (unlikely(sqe->file_index || sqe->addr2))
                return -EINVAL;
 
        sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
        sr->len = READ_ONCE(sqe->len);
-       sr->flags = READ_ONCE(sqe->addr2);
+       sr->flags = READ_ONCE(sqe->ioprio);
        if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
                return -EINVAL;
        sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
@@ -6396,14 +6324,12 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_sr_msg *sr = &req->sr_msg;
 
-       if (unlikely(sqe->file_index))
-               return -EINVAL;
-       if (unlikely(sqe->addr2 || sqe->file_index))
+       if (unlikely(sqe->file_index || sqe->addr2))
                return -EINVAL;
 
        sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
        sr->len = READ_ONCE(sqe->len);
-       sr->flags = READ_ONCE(sqe->addr2);
+       sr->flags = READ_ONCE(sqe->ioprio);
        if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
                return -EINVAL;
        sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
@@ -7037,7 +6963,8 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
                io_req_complete_failed(req, ret);
 }
 
-static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events)
+static void __io_poll_execute(struct io_kiocb *req, int mask,
+                             __poll_t __maybe_unused events)
 {
        req->cqe.res = mask;
        /*
@@ -7046,7 +6973,6 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events)
         * CPU. We want to avoid pulling in req->apoll->events for that
         * case.
         */
-       req->apoll_events = events;
        if (req->opcode == IORING_OP_POLL_ADD)
                req->io_task_work.func = io_poll_task_func;
        else
@@ -7197,6 +7123,8 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
        io_init_poll_iocb(poll, mask, io_poll_wake);
        poll->file = req->file;
 
+       req->apoll_events = poll->events;
+
        ipt->pt._key = mask;
        ipt->req = req;
        ipt->error = 0;
@@ -7227,8 +7155,11 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
 
        if (mask) {
                /* can't multishot if failed, just queue the event we've got */
-               if (unlikely(ipt->error || !ipt->nr_entries))
+               if (unlikely(ipt->error || !ipt->nr_entries)) {
                        poll->events |= EPOLLONESHOT;
+                       req->apoll_events |= EPOLLONESHOT;
+                       ipt->error = 0;
+               }
                __io_poll_execute(req, mask, poll->events);
                return 0;
        }
@@ -7290,6 +7221,7 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
                mask |= EPOLLEXCLUSIVE;
        if (req->flags & REQ_F_POLLED) {
                apoll = req->apoll;
+               kfree(apoll->double_poll);
        } else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
                   !list_empty(&ctx->apoll_cache)) {
                apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
@@ -7475,7 +7407,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
                return -EINVAL;
 
        io_req_set_refcount(req);
-       req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
+       poll->events = io_poll_parse_events(sqe, flags);
        return 0;
 }
 
@@ -7488,6 +7420,8 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
        ipt.pt._qproc = io_poll_queue_proc;
 
        ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events);
+       if (!ret && ipt.error)
+               req_set_fail(req);
        ret = ret ?: ipt.error;
        if (ret)
                __io_req_complete(req, issue_flags, ret, 0);
@@ -8047,6 +7981,9 @@ static int io_files_update_with_index_alloc(struct io_kiocb *req,
        struct file *file;
        int ret, fd;
 
+       if (!req->ctx->file_data)
+               return -ENXIO;
+
        for (done = 0; done < req->rsrc_update.nr_args; done++) {
                if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
                        ret = -EFAULT;
@@ -8063,8 +8000,8 @@ static int io_files_update_with_index_alloc(struct io_kiocb *req,
                if (ret < 0)
                        break;
                if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
-                       ret = -EFAULT;
                        __io_close_fixed(req, issue_flags, ret);
+                       ret = -EFAULT;
                        break;
                }
        }
@@ -8773,6 +8710,7 @@ static void io_queue_async(struct io_kiocb *req, int ret)
                 * Queued up for async execution, worker will release
                 * submit reference when the iocb is actually submitted.
                 */
+               io_kbuf_recycle(req, 0);
                io_queue_iowq(req, NULL);
                break;
        case IO_APOLL_OK:
@@ -9788,11 +9726,19 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
 
 static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
 {
+       unsigned nr = ctx->nr_user_files;
        int ret;
 
        if (!ctx->file_data)
                return -ENXIO;
+
+       /*
+        * Quiesce may unlock ->uring_lock, and while it's not held
+        * prevent new requests using the table.
+        */
+       ctx->nr_user_files = 0;
        ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
+       ctx->nr_user_files = nr;
        if (!ret)
                __io_sqe_files_unregister(ctx);
        return ret;
@@ -10690,12 +10636,19 @@ static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
 
 static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
 {
+       unsigned nr = ctx->nr_user_bufs;
        int ret;
 
        if (!ctx->buf_data)
                return -ENXIO;
 
+       /*
+        * Quiesce may unlock ->uring_lock, and while it's not held
+        * prevent new requests using the table.
+        */
+       ctx->nr_user_bufs = 0;
        ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
+       ctx->nr_user_bufs = nr;
        if (!ret)
                __io_sqe_buffers_unregister(ctx);
        return ret;
@@ -12986,7 +12939,7 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
 {
        struct io_uring_buf_ring *br;
        struct io_uring_buf_reg reg;
-       struct io_buffer_list *bl;
+       struct io_buffer_list *bl, *free_bl = NULL;
        struct page **pages;
        int nr_pages;
 
@@ -13002,6 +12955,10 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
        if (!is_power_of_2(reg.ring_entries))
                return -EINVAL;
 
+       /* cannot disambiguate full vs empty due to head/tail size */
+       if (reg.ring_entries >= 65536)
+               return -EINVAL;
+
        if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
                int ret = io_init_bl_list(ctx);
                if (ret)
@@ -13014,7 +12971,7 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
                if (bl->buf_nr_pages || !list_empty(&bl->buf_list))
                        return -EEXIST;
        } else {
-               bl = kzalloc(sizeof(*bl), GFP_KERNEL);
+               free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
                if (!bl)
                        return -ENOMEM;
        }
@@ -13023,7 +12980,7 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
                             struct_size(br, bufs, reg.ring_entries),
                             &nr_pages);
        if (IS_ERR(pages)) {
-               kfree(bl);
+               kfree(free_bl);
                return PTR_ERR(pages);
        }
 
index e49bb09..e9c308a 100644 (file)
@@ -2114,7 +2114,7 @@ out:
 /**
  * jbd2_journal_try_to_free_buffers() - try to free page buffers.
  * @journal: journal for operation
- * @page: to try and free
+ * @folio: Folio to detach data from.
  *
  * For all the buffers on this page,
  * if they are fully written out ordered data, move them onto BUF_CLEAN
index 1d732fd..332dc9a 100644 (file)
@@ -95,14 +95,14 @@ int jfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
        if (rc)
                return rc;
 
-       if (is_quota_modification(inode, iattr)) {
+       if (is_quota_modification(mnt_userns, inode, iattr)) {
                rc = dquot_initialize(inode);
                if (rc)
                        return rc;
        }
        if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
            (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
-               rc = dquot_transfer(inode, iattr);
+               rc = dquot_transfer(mnt_userns, inode, iattr);
                if (rc)
                        return rc;
        }
index e6f4ccc..353f047 100644 (file)
@@ -6490,6 +6490,7 @@ int smb2_write(struct ksmbd_work *work)
                goto out;
        }
 
+       ksmbd_debug(SMB, "flags %u\n", le32_to_cpu(req->Flags));
        if (le32_to_cpu(req->Flags) & SMB2_WRITEFLAG_WRITE_THROUGH)
                writethrough = true;
 
@@ -6505,10 +6506,6 @@ int smb2_write(struct ksmbd_work *work)
                data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
                                    le16_to_cpu(req->DataOffset));
 
-               ksmbd_debug(SMB, "flags %u\n", le32_to_cpu(req->Flags));
-               if (le32_to_cpu(req->Flags) & SMB2_WRITEFLAG_WRITE_THROUGH)
-                       writethrough = true;
-
                ksmbd_debug(SMB, "filename %pd, offset %lld, len %zu\n",
                            fp->filp->f_path.dentry, offset, length);
                err = ksmbd_vfs_write(work, fp, data_buf, length, &offset,
@@ -7703,7 +7700,7 @@ int smb2_ioctl(struct ksmbd_work *work)
        {
                struct file_zero_data_information *zero_data;
                struct ksmbd_file *fp;
-               loff_t off, len;
+               loff_t off, len, bfz;
 
                if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
                        ksmbd_debug(SMB,
@@ -7720,19 +7717,26 @@ int smb2_ioctl(struct ksmbd_work *work)
                zero_data =
                        (struct file_zero_data_information *)&req->Buffer[0];
 
-               fp = ksmbd_lookup_fd_fast(work, id);
-               if (!fp) {
-                       ret = -ENOENT;
+               off = le64_to_cpu(zero_data->FileOffset);
+               bfz = le64_to_cpu(zero_data->BeyondFinalZero);
+               if (off > bfz) {
+                       ret = -EINVAL;
                        goto out;
                }
 
-               off = le64_to_cpu(zero_data->FileOffset);
-               len = le64_to_cpu(zero_data->BeyondFinalZero) - off;
+               len = bfz - off;
+               if (len) {
+                       fp = ksmbd_lookup_fd_fast(work, id);
+                       if (!fp) {
+                               ret = -ENOENT;
+                               goto out;
+                       }
 
-               ret = ksmbd_vfs_zero_data(work, fp, off, len);
-               ksmbd_fd_put(work, fp);
-               if (ret < 0)
-                       goto out;
+                       ret = ksmbd_vfs_zero_data(work, fp, off, len);
+                       ksmbd_fd_put(work, fp);
+                       if (ret < 0)
+                               goto out;
+               }
                break;
        }
        case FSCTL_QUERY_ALLOCATED_RANGES:
@@ -7806,14 +7810,24 @@ int smb2_ioctl(struct ksmbd_work *work)
                src_off = le64_to_cpu(dup_ext->SourceFileOffset);
                dst_off = le64_to_cpu(dup_ext->TargetFileOffset);
                length = le64_to_cpu(dup_ext->ByteCount);
-               cloned = vfs_clone_file_range(fp_in->filp, src_off, fp_out->filp,
-                                             dst_off, length, 0);
+               /*
+                * XXX: It is not clear if FSCTL_DUPLICATE_EXTENTS_TO_FILE
+                * should fall back to vfs_copy_file_range().  This could be
+                * beneficial when re-exporting nfs/smb mount, but note that
+                * this can result in partial copy that returns an error status.
+                * If/when FSCTL_DUPLICATE_EXTENTS_TO_FILE_EX is implemented,
+                * fall back to vfs_copy_file_range(), should be avoided when
+                * the flag DUPLICATE_EXTENTS_DATA_EX_SOURCE_ATOMIC is set.
+                */
+               cloned = vfs_clone_file_range(fp_in->filp, src_off,
+                                             fp_out->filp, dst_off, length, 0);
                if (cloned == -EXDEV || cloned == -EOPNOTSUPP) {
                        ret = -EOPNOTSUPP;
                        goto dup_ext_out;
                } else if (cloned != length) {
                        cloned = vfs_copy_file_range(fp_in->filp, src_off,
-                                                    fp_out->filp, dst_off, length, 0);
+                                                    fp_out->filp, dst_off,
+                                                    length, 0);
                        if (cloned != length) {
                                if (cloned < 0)
                                        ret = cloned;
index d035e06..35b55ee 100644 (file)
@@ -5,16 +5,6 @@
  *
  *   Author(s): Long Li <longli@microsoft.com>,
  *             Hyunchul Lee <hyc.lee@gmail.com>
- *
- *   This program is free software;  you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; either version 2 of the License, or
- *   (at your option) any later version.
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
- *   the GNU General Public License for more details.
  */
 
 #define SUBMOD_NAME    "smb_direct"
index 8fef9de..143bba4 100644 (file)
@@ -230,7 +230,7 @@ static int ksmbd_kthread_fn(void *p)
                        break;
                }
                ret = kernel_accept(iface->ksmbd_socket, &client_sk,
-                                   O_NONBLOCK);
+                                   SOCK_NONBLOCK);
                mutex_unlock(&iface->sock_release_lock);
                if (ret) {
                        if (ret == -EAGAIN)
index dcdd07c..7c84902 100644 (file)
@@ -963,7 +963,7 @@ ssize_t ksmbd_vfs_getxattr(struct user_namespace *user_ns,
  */
 int ksmbd_vfs_setxattr(struct user_namespace *user_ns,
                       struct dentry *dentry, const char *attr_name,
-                      const void *attr_value, size_t attr_size, int flags)
+                      void *attr_value, size_t attr_size, int flags)
 {
        int err;
 
@@ -1015,7 +1015,9 @@ int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
                                     FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
                                     off, len);
 
-       return vfs_fallocate(fp->filp, FALLOC_FL_ZERO_RANGE, off, len);
+       return vfs_fallocate(fp->filp,
+                            FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE,
+                            off, len);
 }
 
 int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
@@ -1046,7 +1048,7 @@ int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
        *out_count = 0;
        end = start + length;
        while (start < end && *out_count < in_count) {
-               extent_start = f->f_op->llseek(f, start, SEEK_DATA);
+               extent_start = vfs_llseek(f, start, SEEK_DATA);
                if (extent_start < 0) {
                        if (extent_start != -ENXIO)
                                ret = (int)extent_start;
@@ -1056,7 +1058,7 @@ int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
                if (extent_start >= end)
                        break;
 
-               extent_end = f->f_op->llseek(f, extent_start, SEEK_HOLE);
+               extent_end = vfs_llseek(f, extent_start, SEEK_HOLE);
                if (extent_end < 0) {
                        if (extent_end != -ENXIO)
                                ret = (int)extent_end;
@@ -1777,6 +1779,10 @@ int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
 
                ret = vfs_copy_file_range(src_fp->filp, src_off,
                                          dst_fp->filp, dst_off, len, 0);
+               if (ret == -EOPNOTSUPP || ret == -EXDEV)
+                       ret = generic_copy_file_range(src_fp->filp, src_off,
+                                                     dst_fp->filp, dst_off,
+                                                     len, 0);
                if (ret < 0)
                        return ret;
 
index 8c37aaf..70da4c0 100644 (file)
@@ -109,7 +109,7 @@ ssize_t ksmbd_vfs_casexattr_len(struct user_namespace *user_ns,
                                int attr_name_len);
 int ksmbd_vfs_setxattr(struct user_namespace *user_ns,
                       struct dentry *dentry, const char *attr_name,
-                      const void *attr_value, size_t attr_size, int flags);
+                      void *attr_value, size_t attr_size, int flags);
 int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
                                size_t *xattr_stream_name_size, int s_type);
 int ksmbd_vfs_remove_xattr(struct user_namespace *user_ns,
index 0a22a2f..e1c4617 100644 (file)
@@ -176,7 +176,7 @@ nlm_delete_file(struct nlm_file *file)
        }
 }
 
-static int nlm_unlock_files(struct nlm_file *file)
+static int nlm_unlock_files(struct nlm_file *file, fl_owner_t owner)
 {
        struct file_lock lock;
 
@@ -184,6 +184,7 @@ static int nlm_unlock_files(struct nlm_file *file)
        lock.fl_type  = F_UNLCK;
        lock.fl_start = 0;
        lock.fl_end   = OFFSET_MAX;
+       lock.fl_owner = owner;
        if (file->f_file[O_RDONLY] &&
            vfs_lock_file(file->f_file[O_RDONLY], F_SETLK, &lock, NULL))
                goto out_err;
@@ -225,7 +226,7 @@ again:
                if (match(lockhost, host)) {
 
                        spin_unlock(&flctx->flc_lock);
-                       if (nlm_unlock_files(file))
+                       if (nlm_unlock_files(file, fl->fl_owner))
                                return 1;
                        goto again;
                }
@@ -282,11 +283,10 @@ nlm_file_inuse(struct nlm_file *file)
 
 static void nlm_close_files(struct nlm_file *file)
 {
-       struct file *f;
-
-       for (f = file->f_file[0]; f <= file->f_file[1]; f++)
-               if (f)
-                       nlmsvc_ops->fclose(f);
+       if (file->f_file[O_RDONLY])
+               nlmsvc_ops->fclose(file->f_file[O_RDONLY]);
+       if (file->f_file[O_WRONLY])
+               nlmsvc_ops->fclose(file->f_file[O_WRONLY]);
 }
 
 /*
index ca28e0e..c266cfd 100644 (file)
@@ -425,21 +425,9 @@ static inline int flock_translate_cmd(int cmd) {
 }
 
 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
-static struct file_lock *
-flock_make_lock(struct file *filp, unsigned int cmd, struct file_lock *fl)
+static void flock_make_lock(struct file *filp, struct file_lock *fl, int type)
 {
-       int type = flock_translate_cmd(cmd);
-
-       if (type < 0)
-               return ERR_PTR(type);
-
-       if (fl == NULL) {
-               fl = locks_alloc_lock();
-               if (fl == NULL)
-                       return ERR_PTR(-ENOMEM);
-       } else {
-               locks_init_lock(fl);
-       }
+       locks_init_lock(fl);
 
        fl->fl_file = filp;
        fl->fl_owner = filp;
@@ -447,8 +435,6 @@ flock_make_lock(struct file *filp, unsigned int cmd, struct file_lock *fl)
        fl->fl_flags = FL_FLOCK;
        fl->fl_type = type;
        fl->fl_end = OFFSET_MAX;
-
-       return fl;
 }
 
 static int assign_type(struct file_lock *fl, long type)
@@ -2097,21 +2083,9 @@ EXPORT_SYMBOL(locks_lock_inode_wait);
  */
 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
 {
-       struct fd f = fdget(fd);
-       struct file_lock *lock;
-       int can_sleep, unlock;
-       int error;
-
-       error = -EBADF;
-       if (!f.file)
-               goto out;
-
-       can_sleep = !(cmd & LOCK_NB);
-       cmd &= ~LOCK_NB;
-       unlock = (cmd == LOCK_UN);
-
-       if (!unlock && !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
-               goto out_putf;
+       int can_sleep, error, type;
+       struct file_lock fl;
+       struct fd f;
 
        /*
         * LOCK_MAND locks were broken for a long time in that they never
@@ -2123,36 +2097,41 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
         */
        if (cmd & LOCK_MAND) {
                pr_warn_once("Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n");
-               error = 0;
-               goto out_putf;
+               return 0;
        }
 
-       lock = flock_make_lock(f.file, cmd, NULL);
-       if (IS_ERR(lock)) {
-               error = PTR_ERR(lock);
+       type = flock_translate_cmd(cmd & ~LOCK_NB);
+       if (type < 0)
+               return type;
+
+       error = -EBADF;
+       f = fdget(fd);
+       if (!f.file)
+               return error;
+
+       if (type != F_UNLCK && !(f.file->f_mode & (FMODE_READ | FMODE_WRITE)))
                goto out_putf;
-       }
 
-       if (can_sleep)
-               lock->fl_flags |= FL_SLEEP;
+       flock_make_lock(f.file, &fl, type);
 
-       error = security_file_lock(f.file, lock->fl_type);
+       error = security_file_lock(f.file, fl.fl_type);
        if (error)
-               goto out_free;
+               goto out_putf;
+
+       can_sleep = !(cmd & LOCK_NB);
+       if (can_sleep)
+               fl.fl_flags |= FL_SLEEP;
 
        if (f.file->f_op->flock)
                error = f.file->f_op->flock(f.file,
-                                         (can_sleep) ? F_SETLKW : F_SETLK,
-                                         lock);
+                                           (can_sleep) ? F_SETLKW : F_SETLK,
+                                           &fl);
        else
-               error = locks_lock_file_wait(f.file, lock);
-
- out_free:
-       locks_free_lock(lock);
+               error = locks_lock_file_wait(f.file, &fl);
 
  out_putf:
        fdput(f);
- out:
+
        return error;
 }
 
@@ -2614,7 +2593,7 @@ locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
        if (list_empty(&flctx->flc_flock))
                return;
 
-       flock_make_lock(filp, LOCK_UN, &fl);
+       flock_make_lock(filp, &fl, F_UNLCK);
        fl.fl_flags |= FL_CLOSE;
 
        if (filp->f_op->flock)
index 42f892c..0ce5358 100644 (file)
@@ -319,8 +319,9 @@ zero_out:
  * conflicting writes once the folio is grabbed and locked.  It is passed a
  * pointer to the fsdata cookie that gets returned to the VM to be passed to
  * write_end.  It is permitted to sleep.  It should return 0 if the request
- * should go ahead; unlock the folio and return -EAGAIN to cause the folio to
- * be regot; or return an error.
+ * should go ahead or it may return an error.  It may also unlock and put the
+ * folio, provided it sets ``*foliop`` to NULL, in which case a return of 0
+ * will cause the folio to be re-got and the process to be retried.
  *
  * The calling netfs must initialise a netfs context contiguous to the vfs
  * inode before calling this.
@@ -348,13 +349,13 @@ retry:
 
        if (ctx->ops->check_write_begin) {
                /* Allow the netfs (eg. ceph) to flush conflicts. */
-               ret = ctx->ops->check_write_begin(file, pos, len, folio, _fsdata);
+               ret = ctx->ops->check_write_begin(file, pos, len, &folio, _fsdata);
                if (ret < 0) {
                        trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
-                       if (ret == -EAGAIN)
-                               goto retry;
                        goto error;
                }
+               if (!folio)
+                       goto retry;
        }
 
        if (folio_test_uptodate(folio))
@@ -416,8 +417,10 @@ have_folio_no_wait:
 error_put:
        netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
 error:
-       folio_unlock(folio);
-       folio_put(folio);
+       if (folio) {
+               folio_unlock(folio);
+               folio_put(folio);
+       }
        _leave(" = %d", ret);
        return ret;
 }
index c852028..c1eda73 100644 (file)
@@ -288,6 +288,7 @@ static u32 initiate_file_draining(struct nfs_client *clp,
                rv = NFS4_OK;
                break;
        case -ENOENT:
+               set_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags);
                /* Embrace your forgetfulness! */
                rv = NFS4ERR_NOMATCHING_LAYOUT;
 
index a8ecdd5..0c4e8dd 100644 (file)
@@ -2124,6 +2124,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
                }
                goto out;
        }
+       file->f_mode |= FMODE_CAN_ODIRECT;
 
        err = nfs_finish_open(ctx, ctx->dentry, file, open_flags);
        trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
index 03d3a27..e88f6b1 100644 (file)
@@ -93,6 +93,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
        nfs_file_set_open_context(filp, ctx);
        nfs_fscache_open_file(inode, filp);
        err = 0;
+       filp->f_mode |= FMODE_CAN_ODIRECT;
 
 out_put_ctx:
        put_nfs_open_context(ctx);
index c0fdcf8..bb0e84a 100644 (file)
@@ -4012,22 +4012,29 @@ static int _nfs4_discover_trunking(struct nfs_server *server,
        }
 
        page = alloc_page(GFP_KERNEL);
+       if (!page)
+               return -ENOMEM;
        locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
-       if (page == NULL || locations == NULL)
-               goto out;
+       if (!locations)
+               goto out_free;
+       locations->fattr = nfs_alloc_fattr();
+       if (!locations->fattr)
+               goto out_free_2;
 
        status = nfs4_proc_get_locations(server, fhandle, locations, page,
                                         cred);
        if (status)
-               goto out;
+               goto out_free_3;
 
        for (i = 0; i < locations->nlocations; i++)
                test_fs_location_for_trunking(&locations->locations[i], clp,
                                              server);
-out:
-       if (page)
-               __free_page(page);
+out_free_3:
+       kfree(locations->fattr);
+out_free_2:
        kfree(locations);
+out_free:
+       __free_page(page);
        return status;
 }
 
index 2540b35..9bab3e9 100644 (file)
@@ -2753,5 +2753,6 @@ again:
                goto again;
 
        nfs_put_client(clp);
+       module_put_and_kthread_exit(0);
        return 0;
 }
index 68a87be..41a9b6b 100644 (file)
@@ -469,6 +469,7 @@ pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
                pnfs_clear_lseg_state(lseg, lseg_list);
        pnfs_clear_layoutreturn_info(lo);
        pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
+       set_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags);
        if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
            !test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
                pnfs_clear_layoutreturn_waitbit(lo);
@@ -1917,8 +1918,9 @@ static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
 
 static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
 {
-       if (atomic_dec_and_test(&lo->plh_outstanding))
-               wake_up_var(&lo->plh_outstanding);
+       if (atomic_dec_and_test(&lo->plh_outstanding) &&
+           test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags))
+               wake_up_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN);
 }
 
 static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
@@ -2025,11 +2027,11 @@ lookup_again:
         * If the layout segment list is empty, but there are outstanding
         * layoutget calls, then they might be subject to a layoutrecall.
         */
-       if ((list_empty(&lo->plh_segs) || !pnfs_layout_is_valid(lo)) &&
+       if (test_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags) &&
            atomic_read(&lo->plh_outstanding) != 0) {
                spin_unlock(&ino->i_lock);
-               lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
-                                       !atomic_read(&lo->plh_outstanding)));
+               lseg = ERR_PTR(wait_on_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN,
+                                          TASK_KILLABLE));
                if (IS_ERR(lseg))
                        goto out_put_layout_hdr;
                pnfs_put_layout_hdr(lo);
@@ -2152,6 +2154,12 @@ lookup_again:
                case -ERECALLCONFLICT:
                case -EAGAIN:
                        break;
+               case -ENODATA:
+                       /* The server returned NFS4ERR_LAYOUTUNAVAILABLE */
+                       pnfs_layout_set_fail_bit(
+                               lo, pnfs_iomode_to_fail_bit(iomode));
+                       lseg = NULL;
+                       goto out_put_layout_hdr;
                default:
                        if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
                                pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
@@ -2407,7 +2415,8 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
                goto out_forget;
        }
 
-       if (!pnfs_layout_is_valid(lo) && !pnfs_is_first_layoutget(lo))
+       if (test_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags) &&
+           !pnfs_is_first_layoutget(lo))
                goto out_forget;
 
        if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
index 07f1148..f331f06 100644 (file)
@@ -105,6 +105,7 @@ enum {
        NFS_LAYOUT_FIRST_LAYOUTGET,     /* Serialize first layoutget */
        NFS_LAYOUT_INODE_FREEING,       /* The inode is being freed */
        NFS_LAYOUT_HASHED,              /* The layout visible */
+       NFS_LAYOUT_DRAIN,
 };
 
 enum layoutdriver_policy_flags {
index 61b2aae..2acea77 100644 (file)
@@ -470,6 +470,15 @@ nfsd4_decode_fattr4(struct nfsd4_compoundargs *argp, u32 *bmval, u32 bmlen,
                        return nfserr_bad_xdr;
                }
        }
+       if (bmval[1] & FATTR4_WORD1_TIME_CREATE) {
+               struct timespec64 ts;
+
+               /* No Linux filesystem supports setting this attribute. */
+               bmval[1] &= ~FATTR4_WORD1_TIME_CREATE;
+               status = nfsd4_decode_nfstime4(argp, &ts);
+               if (status)
+                       return status;
+       }
        if (bmval[1] & FATTR4_WORD1_TIME_MODIFY_SET) {
                u32 set_it;
 
index 847b482..9a8b09a 100644 (file)
@@ -465,7 +465,8 @@ static inline bool nfsd_attrs_supported(u32 minorversion, const u32 *bmval)
        (FATTR4_WORD0_SIZE | FATTR4_WORD0_ACL)
 #define NFSD_WRITEABLE_ATTRS_WORD1 \
        (FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \
-       | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
+       | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_CREATE \
+       | FATTR4_WORD1_TIME_MODIFY_SET)
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
 #define MAYBE_FATTR4_WORD2_SECURITY_LABEL \
        FATTR4_WORD2_SECURITY_LABEL
index 840e3af..d79db56 100644 (file)
@@ -577,6 +577,7 @@ out_err:
 ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
                             u64 dst_pos, u64 count)
 {
+       ssize_t ret;
 
        /*
         * Limit copy to 4MB to prevent indefinitely blocking an nfsd
@@ -587,7 +588,12 @@ ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
         * limit like this and pipeline multiple COPY requests.
         */
        count = min_t(u64, count, 1 << 22);
-       return vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0);
+       ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0);
+
+       if (ret == -EOPNOTSUPP || ret == -EXDEV)
+               ret = generic_copy_file_range(src, src_pos, dst, dst_pos,
+                                             count, 0);
+       return ret;
 }
 
 __be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp,
@@ -1173,6 +1179,7 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, u64 offset,
                        nfsd_copy_write_verifier(verf, nn);
                        err2 = filemap_check_wb_err(nf->nf_file->f_mapping,
                                                    since);
+                       err = nfserrno(err2);
                        break;
                case -EINVAL:
                        err = nfserr_notsupp;
@@ -1180,8 +1187,8 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, u64 offset,
                default:
                        nfsd_reset_write_verifier(nn);
                        trace_nfsd_writeverf_reset(nn, rqstp, err2);
+                       err = nfserrno(err2);
                }
-               err = nfserrno(err2);
        } else
                nfsd_copy_write_verifier(verf, nn);
 
index 1344f7d..aecda4f 100644 (file)
@@ -198,6 +198,9 @@ static inline int nilfs_acl_chmod(struct inode *inode)
 
 static inline int nilfs_init_acl(struct inode *inode, struct inode *dir)
 {
+       if (S_ISLNK(inode->i_mode))
+               return 0;
+
        inode->i_mode &= ~current_umask();
        return 0;
 }
index 4f897e1..cd7d09a 100644 (file)
@@ -295,12 +295,13 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
                                     const void *data, int data_type,
                                     struct inode *dir)
 {
-       __u32 marks_mask = 0, marks_ignored_mask = 0;
+       __u32 marks_mask = 0, marks_ignore_mask = 0;
        __u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS |
                                     FANOTIFY_EVENT_FLAGS;
        const struct path *path = fsnotify_data_path(data, data_type);
        unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS);
        struct fsnotify_mark *mark;
+       bool ondir = event_mask & FAN_ONDIR;
        int type;
 
        pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n",
@@ -315,19 +316,21 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
                        return 0;
        } else if (!(fid_mode & FAN_REPORT_FID)) {
                /* Do we have a directory inode to report? */
-               if (!dir && !(event_mask & FS_ISDIR))
+               if (!dir && !ondir)
                        return 0;
        }
 
        fsnotify_foreach_iter_mark_type(iter_info, mark, type) {
-               /* Apply ignore mask regardless of mark's ISDIR flag */
-               marks_ignored_mask |= mark->ignored_mask;
+               /*
+                * Apply ignore mask depending on event flags in ignore mask.
+                */
+               marks_ignore_mask |=
+                       fsnotify_effective_ignore_mask(mark, ondir, type);
 
                /*
-                * If the event is on dir and this mark doesn't care about
-                * events on dir, don't send it!
+                * Send the event depending on event flags in mark mask.
                 */
-               if (event_mask & FS_ISDIR && !(mark->mask & FS_ISDIR))
+               if (!fsnotify_mask_applicable(mark->mask, ondir, type))
                        continue;
 
                marks_mask |= mark->mask;
@@ -336,7 +339,7 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
                *match_mask |= 1U << type;
        }
 
-       test_mask = event_mask & marks_mask & ~marks_ignored_mask;
+       test_mask = event_mask & marks_mask & ~marks_ignore_mask;
 
        /*
         * For dirent modification events (create/delete/move) that do not carry
index 80e0ec9..1d9f112 100644 (file)
@@ -499,6 +499,8 @@ static inline unsigned int fanotify_mark_user_flags(struct fsnotify_mark *mark)
                mflags |= FAN_MARK_IGNORED_SURV_MODIFY;
        if (mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF)
                mflags |= FAN_MARK_EVICTABLE;
+       if (mark->flags & FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS)
+               mflags |= FAN_MARK_IGNORE;
 
        return mflags;
 }
index c2255b4..f0e49a4 100644 (file)
@@ -1009,10 +1009,10 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
        mask &= ~umask;
        spin_lock(&fsn_mark->lock);
        oldmask = fsnotify_calc_mask(fsn_mark);
-       if (!(flags & FAN_MARK_IGNORED_MASK)) {
+       if (!(flags & FANOTIFY_MARK_IGNORE_BITS)) {
                fsn_mark->mask &= ~mask;
        } else {
-               fsn_mark->ignored_mask &= ~mask;
+               fsn_mark->ignore_mask &= ~mask;
        }
        newmask = fsnotify_calc_mask(fsn_mark);
        /*
@@ -1021,7 +1021,7 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
         * changes to the mask.
         * Destroy mark when only umask bits remain.
         */
-       *destroy = !((fsn_mark->mask | fsn_mark->ignored_mask) & ~umask);
+       *destroy = !((fsn_mark->mask | fsn_mark->ignore_mask) & ~umask);
        spin_unlock(&fsn_mark->lock);
 
        return oldmask & ~newmask;
@@ -1085,15 +1085,24 @@ static bool fanotify_mark_update_flags(struct fsnotify_mark *fsn_mark,
                                       unsigned int fan_flags)
 {
        bool want_iref = !(fan_flags & FAN_MARK_EVICTABLE);
+       unsigned int ignore = fan_flags & FANOTIFY_MARK_IGNORE_BITS;
        bool recalc = false;
 
        /*
+        * When using FAN_MARK_IGNORE for the first time, mark starts using
+        * independent event flags in ignore mask.  After that, trying to
+        * update the ignore mask with the old FAN_MARK_IGNORED_MASK API
+        * will result in EEXIST error.
+        */
+       if (ignore == FAN_MARK_IGNORE)
+               fsn_mark->flags |= FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS;
+
+       /*
         * Setting FAN_MARK_IGNORED_SURV_MODIFY for the first time may lead to
         * the removal of the FS_MODIFY bit in calculated mask if it was set
-        * because of an ignored mask that is now going to survive FS_MODIFY.
+        * because of an ignore mask that is now going to survive FS_MODIFY.
         */
-       if ((fan_flags & FAN_MARK_IGNORED_MASK) &&
-           (fan_flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
+       if (ignore && (fan_flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
            !(fsn_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) {
                fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
                if (!(fsn_mark->mask & FS_MODIFY))
@@ -1120,10 +1129,10 @@ static bool fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
        bool recalc;
 
        spin_lock(&fsn_mark->lock);
-       if (!(fan_flags & FAN_MARK_IGNORED_MASK))
+       if (!(fan_flags & FANOTIFY_MARK_IGNORE_BITS))
                fsn_mark->mask |= mask;
        else
-               fsn_mark->ignored_mask |= mask;
+               fsn_mark->ignore_mask |= mask;
 
        recalc = fsnotify_calc_mask(fsn_mark) &
                ~fsnotify_conn_mask(fsn_mark->connector);
@@ -1187,6 +1196,37 @@ static int fanotify_group_init_error_pool(struct fsnotify_group *group)
                                         sizeof(struct fanotify_error_event));
 }
 
+static int fanotify_may_update_existing_mark(struct fsnotify_mark *fsn_mark,
+                                             unsigned int fan_flags)
+{
+       /*
+        * Non evictable mark cannot be downgraded to evictable mark.
+        */
+       if (fan_flags & FAN_MARK_EVICTABLE &&
+           !(fsn_mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF))
+               return -EEXIST;
+
+       /*
+        * New ignore mask semantics cannot be downgraded to old semantics.
+        */
+       if (fan_flags & FAN_MARK_IGNORED_MASK &&
+           fsn_mark->flags & FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS)
+               return -EEXIST;
+
+       /*
+        * An ignore mask that survives modify could never be downgraded to not
+        * survive modify.  With new FAN_MARK_IGNORE semantics we make that rule
+        * explicit and return an error when trying to update the ignore mask
+        * without the original FAN_MARK_IGNORED_SURV_MODIFY value.
+        */
+       if (fan_flags & FAN_MARK_IGNORE &&
+           !(fan_flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
+           fsn_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)
+               return -EEXIST;
+
+       return 0;
+}
+
 static int fanotify_add_mark(struct fsnotify_group *group,
                             fsnotify_connp_t *connp, unsigned int obj_type,
                             __u32 mask, unsigned int fan_flags,
@@ -1208,19 +1248,18 @@ static int fanotify_add_mark(struct fsnotify_group *group,
        }
 
        /*
-        * Non evictable mark cannot be downgraded to evictable mark.
+        * Check if requested mark flags conflict with an existing mark flags.
         */
-       if (fan_flags & FAN_MARK_EVICTABLE &&
-           !(fsn_mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF)) {
-               ret = -EEXIST;
+       ret = fanotify_may_update_existing_mark(fsn_mark, fan_flags);
+       if (ret)
                goto out;
-       }
 
        /*
         * Error events are pre-allocated per group, only if strictly
         * needed (i.e. FAN_FS_ERROR was requested).
         */
-       if (!(fan_flags & FAN_MARK_IGNORED_MASK) && (mask & FAN_FS_ERROR)) {
+       if (!(fan_flags & FANOTIFY_MARK_IGNORE_BITS) &&
+           (mask & FAN_FS_ERROR)) {
                ret = fanotify_group_init_error_pool(group);
                if (ret)
                        goto out;
@@ -1261,10 +1300,10 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group,
 
        /*
         * If some other task has this inode open for write we should not add
-        * an ignored mark, unless that ignored mark is supposed to survive
+        * an ignore mask, unless that ignore mask is supposed to survive
         * modification changes anyway.
         */
-       if ((flags & FAN_MARK_IGNORED_MASK) &&
+       if ((flags & FANOTIFY_MARK_IGNORE_BITS) &&
            !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
            inode_is_open_for_write(inode))
                return 0;
@@ -1513,8 +1552,16 @@ static int fanotify_test_fid(struct dentry *dentry)
        return 0;
 }
 
-static int fanotify_events_supported(struct path *path, __u64 mask)
+static int fanotify_events_supported(struct fsnotify_group *group,
+                                    struct path *path, __u64 mask,
+                                    unsigned int flags)
 {
+       unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
+       /* Strict validation of events in non-dir inode mask with v5.17+ APIs */
+       bool strict_dir_events = FAN_GROUP_FLAG(group, FAN_REPORT_TARGET_FID) ||
+                                (mask & FAN_RENAME) ||
+                                (flags & FAN_MARK_IGNORE);
+
        /*
         * Some filesystems such as 'proc' acquire unusual locks when opening
         * files. For them fanotify permission events have high chances of
@@ -1526,6 +1573,16 @@ static int fanotify_events_supported(struct path *path, __u64 mask)
        if (mask & FANOTIFY_PERM_EVENTS &&
            path->mnt->mnt_sb->s_type->fs_flags & FS_DISALLOW_NOTIFY_PERM)
                return -EINVAL;
+
+       /*
+        * We shouldn't have allowed setting dirent events and the directory
+        * flags FAN_ONDIR and FAN_EVENT_ON_CHILD in mask of non-dir inode,
+        * but because we always allowed it, error only when using new APIs.
+        */
+       if (strict_dir_events && mark_type == FAN_MARK_INODE &&
+           !d_is_dir(path->dentry) && (mask & FANOTIFY_DIRONLY_EVENT_BITS))
+               return -ENOTDIR;
+
        return 0;
 }
 
@@ -1540,7 +1597,8 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
        __kernel_fsid_t __fsid, *fsid = NULL;
        u32 valid_mask = FANOTIFY_EVENTS | FANOTIFY_EVENT_FLAGS;
        unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
-       bool ignored = flags & FAN_MARK_IGNORED_MASK;
+       unsigned int mark_cmd = flags & FANOTIFY_MARK_CMD_BITS;
+       unsigned int ignore = flags & FANOTIFY_MARK_IGNORE_BITS;
        unsigned int obj_type, fid_mode;
        u32 umask = 0;
        int ret;
@@ -1569,7 +1627,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
                return -EINVAL;
        }
 
-       switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
+       switch (mark_cmd) {
        case FAN_MARK_ADD:
        case FAN_MARK_REMOVE:
                if (!mask)
@@ -1589,9 +1647,19 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
        if (mask & ~valid_mask)
                return -EINVAL;
 
-       /* Event flags (ONDIR, ON_CHILD) are meaningless in ignored mask */
-       if (ignored)
+
+       /* We don't allow FAN_MARK_IGNORE & FAN_MARK_IGNORED_MASK together */
+       if (ignore == (FAN_MARK_IGNORE | FAN_MARK_IGNORED_MASK))
+               return -EINVAL;
+
+       /*
+        * Event flags (FAN_ONDIR, FAN_EVENT_ON_CHILD) have no effect with
+        * FAN_MARK_IGNORED_MASK.
+        */
+       if (ignore == FAN_MARK_IGNORED_MASK) {
                mask &= ~FANOTIFY_EVENT_FLAGS;
+               umask = FANOTIFY_EVENT_FLAGS;
+       }
 
        f = fdget(fanotify_fd);
        if (unlikely(!f.file))
@@ -1655,7 +1723,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
        if (mask & FAN_RENAME && !(fid_mode & FAN_REPORT_NAME))
                goto fput_and_out;
 
-       if (flags & FAN_MARK_FLUSH) {
+       if (mark_cmd == FAN_MARK_FLUSH) {
                ret = 0;
                if (mark_type == FAN_MARK_MOUNT)
                        fsnotify_clear_vfsmount_marks_by_group(group);
@@ -1671,8 +1739,8 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
        if (ret)
                goto fput_and_out;
 
-       if (flags & FAN_MARK_ADD) {
-               ret = fanotify_events_supported(&path, mask);
+       if (mark_cmd == FAN_MARK_ADD) {
+               ret = fanotify_events_supported(group, &path, mask, flags);
                if (ret)
                        goto path_put_and_out;
        }
@@ -1695,17 +1763,11 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
        else
                mnt = path.mnt;
 
-       /*
-        * FAN_RENAME is not allowed on non-dir (for now).
-        * We shouldn't have allowed setting any dirent events in mask of
-        * non-dir, but because we always allowed it, error only if group
-        * was initialized with the new flag FAN_REPORT_TARGET_FID.
-        */
-       ret = -ENOTDIR;
-       if (inode && !S_ISDIR(inode->i_mode) &&
-           ((mask & FAN_RENAME) ||
-            ((mask & FANOTIFY_DIRENT_EVENTS) &&
-             FAN_GROUP_FLAG(group, FAN_REPORT_TARGET_FID))))
+       ret = mnt ? -EINVAL : -EISDIR;
+       /* FAN_MARK_IGNORE requires SURV_MODIFY for sb/mount/dir marks */
+       if (mark_cmd == FAN_MARK_ADD && ignore == FAN_MARK_IGNORE &&
+           (mnt || S_ISDIR(inode->i_mode)) &&
+           !(flags & FAN_MARK_IGNORED_SURV_MODIFY))
                goto path_put_and_out;
 
        /* Mask out FAN_EVENT_ON_CHILD flag for sb/mount/non-dir marks */
@@ -1717,12 +1779,12 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
                 * events with parent/name info for non-directory.
                 */
                if ((fid_mode & FAN_REPORT_DIR_FID) &&
-                   (flags & FAN_MARK_ADD) && !ignored)
+                   (flags & FAN_MARK_ADD) && !ignore)
                        mask |= FAN_EVENT_ON_CHILD;
        }
 
        /* create/update an inode mark */
-       switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {
+       switch (mark_cmd) {
        case FAN_MARK_ADD:
                if (mark_type == FAN_MARK_MOUNT)
                        ret = fanotify_add_vfsmount_mark(group, mnt, mask,
@@ -1800,7 +1862,7 @@ static int __init fanotify_user_setup(void)
 
        BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS);
        BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 12);
-       BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 10);
+       BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 11);
 
        fanotify_mark_cache = KMEM_CACHE(fsnotify_mark,
                                         SLAB_PANIC|SLAB_ACCOUNT);
index 59fb40a..55081ae 100644 (file)
@@ -113,7 +113,7 @@ static void fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
                        return;
                seq_printf(m, "fanotify ino:%lx sdev:%x mflags:%x mask:%x ignored_mask:%x ",
                           inode->i_ino, inode->i_sb->s_dev,
-                          mflags, mark->mask, mark->ignored_mask);
+                          mflags, mark->mask, mark->ignore_mask);
                show_mark_fhandle(m, inode);
                seq_putc(m, '\n');
                iput(inode);
@@ -121,12 +121,12 @@ static void fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
                struct mount *mnt = fsnotify_conn_mount(mark->connector);
 
                seq_printf(m, "fanotify mnt_id:%x mflags:%x mask:%x ignored_mask:%x\n",
-                          mnt->mnt_id, mflags, mark->mask, mark->ignored_mask);
+                          mnt->mnt_id, mflags, mark->mask, mark->ignore_mask);
        } else if (mark->connector->type == FSNOTIFY_OBJ_TYPE_SB) {
                struct super_block *sb = fsnotify_conn_sb(mark->connector);
 
                seq_printf(m, "fanotify sdev:%x mflags:%x mask:%x ignored_mask:%x\n",
-                          sb->s_dev, mflags, mark->mask, mark->ignored_mask);
+                          sb->s_dev, mflags, mark->mask, mark->ignore_mask);
        }
 }
 
index 0b3e749..7974e91 100644 (file)
@@ -100,7 +100,7 @@ void fsnotify_sb_delete(struct super_block *sb)
  * Given an inode, first check if we care what happens to our children.  Inotify
  * and dnotify both tell their parents about events.  If we care about any event
  * on a child we run all of our children and set a dentry flag saying that the
- * parent cares.  Thus when an event happens on a child it can quickly tell if
+ * parent cares.  Thus when an event happens on a child it can quickly tell
  * if there is a need to find a parent and send the event to the parent.
  */
 void __fsnotify_update_child_dentry_flags(struct inode *inode)
@@ -324,7 +324,8 @@ static int send_to_group(__u32 mask, const void *data, int data_type,
        struct fsnotify_group *group = NULL;
        __u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS);
        __u32 marks_mask = 0;
-       __u32 marks_ignored_mask = 0;
+       __u32 marks_ignore_mask = 0;
+       bool is_dir = mask & FS_ISDIR;
        struct fsnotify_mark *mark;
        int type;
 
@@ -336,7 +337,7 @@ static int send_to_group(__u32 mask, const void *data, int data_type,
                fsnotify_foreach_iter_mark_type(iter_info, mark, type) {
                        if (!(mark->flags &
                              FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
-                               mark->ignored_mask = 0;
+                               mark->ignore_mask = 0;
                }
        }
 
@@ -344,14 +345,15 @@ static int send_to_group(__u32 mask, const void *data, int data_type,
        fsnotify_foreach_iter_mark_type(iter_info, mark, type) {
                group = mark->group;
                marks_mask |= mark->mask;
-               marks_ignored_mask |= mark->ignored_mask;
+               marks_ignore_mask |=
+                       fsnotify_effective_ignore_mask(mark, is_dir, type);
        }
 
-       pr_debug("%s: group=%p mask=%x marks_mask=%x marks_ignored_mask=%x data=%p data_type=%d dir=%p cookie=%d\n",
-                __func__, group, mask, marks_mask, marks_ignored_mask,
+       pr_debug("%s: group=%p mask=%x marks_mask=%x marks_ignore_mask=%x data=%p data_type=%d dir=%p cookie=%d\n",
+                __func__, group, mask, marks_mask, marks_ignore_mask,
                 data, data_type, dir, cookie);
 
-       if (!(test_mask & marks_mask & ~marks_ignored_mask))
+       if (!(test_mask & marks_mask & ~marks_ignore_mask))
                return 0;
 
        if (group->ops->handle_event) {
@@ -423,7 +425,8 @@ static bool fsnotify_iter_select_report_types(
                         * But is *this mark* watching children?
                         */
                        if (type == FSNOTIFY_ITER_TYPE_PARENT &&
-                           !(mark->mask & FS_EVENT_ON_CHILD))
+                           !(mark->mask & FS_EVENT_ON_CHILD) &&
+                           !(fsnotify_ignore_mask(mark) & FS_EVENT_ON_CHILD))
                                continue;
 
                        fsnotify_iter_set_report_type(iter_info, type);
@@ -532,8 +535,8 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
 
 
        /*
-        * If this is a modify event we may need to clear some ignored masks.
-        * In that case, the object with ignored masks will have the FS_MODIFY
+        * If this is a modify event we may need to clear some ignore masks.
+        * In that case, the object with ignore masks will have the FS_MODIFY
         * event in its mask.
         * Otherwise, return if none of the marks care about this type of event.
         */
index ed42a18..1c4bfda 100644 (file)
@@ -136,7 +136,7 @@ static inline u32 inotify_mask_to_arg(__u32 mask)
                       IN_Q_OVERFLOW);
 }
 
-/* intofiy userspace file descriptor functions */
+/* inotify userspace file descriptor functions */
 static __poll_t inotify_poll(struct file *file, poll_table *wait)
 {
        struct fsnotify_group *group = file->private_data;
index 4de597a..52615e6 100644 (file)
@@ -592,8 +592,12 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
                a = (ATTR_RECORD*)((u8*)ctx->attr +
                                le32_to_cpu(ctx->attr->length));
        for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
-               if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
-                               le32_to_cpu(ctx->mrec->bytes_allocated))
+               u8 *mrec_end = (u8 *)ctx->mrec +
+                              le32_to_cpu(ctx->mrec->bytes_allocated);
+               u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
+                              a->name_length * sizeof(ntfschar);
+               if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end ||
+                   name_end > mrec_end)
                        break;
                ctx->attr = a;
                if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
index 7497cd5..9c67edd 100644 (file)
@@ -1146,7 +1146,7 @@ int ocfs2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
        if (status)
                return status;
 
-       if (is_quota_modification(inode, attr)) {
+       if (is_quota_modification(mnt_userns, inode, attr)) {
                status = dquot_initialize(inode);
                if (status)
                        return status;
index 3375275..740b642 100644 (file)
@@ -277,7 +277,6 @@ enum ocfs2_mount_options
        OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT = 1 << 15,  /* Journal Async Commit */
        OCFS2_MOUNT_ERRORS_CONT = 1 << 16, /* Return EIO to the calling process on error */
        OCFS2_MOUNT_ERRORS_ROFS = 1 << 17, /* Change filesystem to read-only on error */
-       OCFS2_MOUNT_NOCLUSTER = 1 << 18, /* No cluster aware filesystem mount */
 };
 
 #define OCFS2_OSB_SOFT_RO      0x0001
@@ -673,8 +672,7 @@ static inline int ocfs2_cluster_o2cb_global_heartbeat(struct ocfs2_super *osb)
 
 static inline int ocfs2_mount_local(struct ocfs2_super *osb)
 {
-       return ((osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT)
-               || (osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER));
+       return (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT);
 }
 
 static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb)
index 0b0ae3e..da7718c 100644 (file)
@@ -252,16 +252,14 @@ static int __ocfs2_find_empty_slot(struct ocfs2_slot_info *si,
        int i, ret = -ENOSPC;
 
        if ((preferred >= 0) && (preferred < si->si_num_slots)) {
-               if (!si->si_slots[preferred].sl_valid ||
-                   !si->si_slots[preferred].sl_node_num) {
+               if (!si->si_slots[preferred].sl_valid) {
                        ret = preferred;
                        goto out;
                }
        }
 
        for(i = 0; i < si->si_num_slots; i++) {
-               if (!si->si_slots[i].sl_valid ||
-                   !si->si_slots[i].sl_node_num) {
+               if (!si->si_slots[i].sl_valid) {
                        ret = i;
                        break;
                }
@@ -456,30 +454,24 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
        spin_lock(&osb->osb_lock);
        ocfs2_update_slot_info(si);
 
-       if (ocfs2_mount_local(osb))
-               /* use slot 0 directly in local mode */
-               slot = 0;
-       else {
-               /* search for ourselves first and take the slot if it already
-                * exists. Perhaps we need to mark this in a variable for our
-                * own journal recovery? Possibly not, though we certainly
-                * need to warn to the user */
-               slot = __ocfs2_node_num_to_slot(si, osb->node_num);
+       /* search for ourselves first and take the slot if it already
+        * exists. Perhaps we need to mark this in a variable for our
+        * own journal recovery? Possibly not, though we certainly
+        * need to warn to the user */
+       slot = __ocfs2_node_num_to_slot(si, osb->node_num);
+       if (slot < 0) {
+               /* if no slot yet, then just take 1st available
+                * one. */
+               slot = __ocfs2_find_empty_slot(si, osb->preferred_slot);
                if (slot < 0) {
-                       /* if no slot yet, then just take 1st available
-                        * one. */
-                       slot = __ocfs2_find_empty_slot(si, osb->preferred_slot);
-                       if (slot < 0) {
-                               spin_unlock(&osb->osb_lock);
-                               mlog(ML_ERROR, "no free slots available!\n");
-                               status = -EINVAL;
-                               goto bail;
-                       }
-               } else
-                       printk(KERN_INFO "ocfs2: Slot %d on device (%s) was "
-                              "already allocated to this node!\n",
-                              slot, osb->dev_str);
-       }
+                       spin_unlock(&osb->osb_lock);
+                       mlog(ML_ERROR, "no free slots available!\n");
+                       status = -EINVAL;
+                       goto bail;
+               }
+       } else
+               printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already "
+                      "allocated to this node!\n", slot, osb->dev_str);
 
        ocfs2_set_slot(si, slot, osb->node_num);
        osb->slot_num = slot;
index f729881..438be02 100644 (file)
@@ -172,7 +172,6 @@ enum {
        Opt_dir_resv_level,
        Opt_journal_async_commit,
        Opt_err_cont,
-       Opt_nocluster,
        Opt_err,
 };
 
@@ -206,7 +205,6 @@ static const match_table_t tokens = {
        {Opt_dir_resv_level, "dir_resv_level=%u"},
        {Opt_journal_async_commit, "journal_async_commit"},
        {Opt_err_cont, "errors=continue"},
-       {Opt_nocluster, "nocluster"},
        {Opt_err, NULL}
 };
 
@@ -618,13 +616,6 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
                goto out;
        }
 
-       tmp = OCFS2_MOUNT_NOCLUSTER;
-       if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) {
-               ret = -EINVAL;
-               mlog(ML_ERROR, "Cannot change nocluster option on remount\n");
-               goto out;
-       }
-
        tmp = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
                OCFS2_MOUNT_HB_NONE;
        if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) {
@@ -865,7 +856,6 @@ static int ocfs2_verify_userspace_stack(struct ocfs2_super *osb,
        }
 
        if (ocfs2_userspace_stack(osb) &&
-           !(osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER) &&
            strncmp(osb->osb_cluster_stack, mopt->cluster_stack,
                    OCFS2_STACK_LABEL_LEN)) {
                mlog(ML_ERROR,
@@ -1137,11 +1127,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
               osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" :
               "ordered");
 
-       if ((osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER) &&
-          !(osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT))
-               printk(KERN_NOTICE "ocfs2: The shared device (%s) is mounted "
-                      "without cluster aware mode.\n", osb->dev_str);
-
        atomic_set(&osb->vol_state, VOLUME_MOUNTED);
        wake_up(&osb->osb_mount_event);
 
@@ -1452,9 +1437,6 @@ static int ocfs2_parse_options(struct super_block *sb,
                case Opt_journal_async_commit:
                        mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT;
                        break;
-               case Opt_nocluster:
-                       mopt->mount_opt |= OCFS2_MOUNT_NOCLUSTER;
-                       break;
                default:
                        mlog(ML_ERROR,
                             "Unrecognized mount option \"%s\" "
@@ -1566,9 +1548,6 @@ static int ocfs2_show_options(struct seq_file *s, struct dentry *root)
        if (opts & OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT)
                seq_printf(s, ",journal_async_commit");
 
-       if (opts & OCFS2_MOUNT_NOCLUSTER)
-               seq_printf(s, ",nocluster");
-
        return 0;
 }
 
index 1d57fbd..2790aac 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -663,6 +663,42 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, umode_t, mode)
        return do_fchmodat(AT_FDCWD, filename, mode);
 }
 
+/**
+ * setattr_vfsuid - check and set ia_fsuid attribute
+ * @kuid: new inode owner
+ *
+ * Check whether @kuid is valid and if so generate and set vfsuid_t in
+ * ia_vfsuid.
+ *
+ * Return: true if @kuid is valid, false if not.
+ */
+static inline bool setattr_vfsuid(struct iattr *attr, kuid_t kuid)
+{
+       if (!uid_valid(kuid))
+               return false;
+       attr->ia_valid |= ATTR_UID;
+       attr->ia_vfsuid = VFSUIDT_INIT(kuid);
+       return true;
+}
+
+/**
+ * setattr_vfsgid - check and set ia_fsgid attribute
+ * @kgid: new inode owner
+ *
+ * Check whether @kgid is valid and if so generate and set vfsgid_t in
+ * ia_vfsgid.
+ *
+ * Return: true if @kgid is valid, false if not.
+ */
+static inline bool setattr_vfsgid(struct iattr *attr, kgid_t kgid)
+{
+       if (!gid_valid(kgid))
+               return false;
+       attr->ia_valid |= ATTR_GID;
+       attr->ia_vfsgid = VFSGIDT_INIT(kgid);
+       return true;
+}
+
 int chown_common(const struct path *path, uid_t user, gid_t group)
 {
        struct user_namespace *mnt_userns, *fs_userns;
@@ -678,28 +714,22 @@ int chown_common(const struct path *path, uid_t user, gid_t group)
 
        mnt_userns = mnt_user_ns(path->mnt);
        fs_userns = i_user_ns(inode);
-       uid = mapped_kuid_user(mnt_userns, fs_userns, uid);
-       gid = mapped_kgid_user(mnt_userns, fs_userns, gid);
 
 retry_deleg:
        newattrs.ia_valid =  ATTR_CTIME;
-       if (user != (uid_t) -1) {
-               if (!uid_valid(uid))
-                       return -EINVAL;
-               newattrs.ia_valid |= ATTR_UID;
-               newattrs.ia_uid = uid;
-       }
-       if (group != (gid_t) -1) {
-               if (!gid_valid(gid))
-                       return -EINVAL;
-               newattrs.ia_valid |= ATTR_GID;
-               newattrs.ia_gid = gid;
-       }
+       if ((user != (uid_t)-1) && !setattr_vfsuid(&newattrs, uid))
+               return -EINVAL;
+       if ((group != (gid_t)-1) && !setattr_vfsgid(&newattrs, gid))
+               return -EINVAL;
        if (!S_ISDIR(inode->i_mode))
                newattrs.ia_valid |=
                        ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
        inode_lock(inode);
-       error = security_path_chown(path, uid, gid);
+       /* Continue to send actual fs values, not the mount values. */
+       error = security_path_chown(
+               path,
+               from_vfsuid(mnt_userns, fs_userns, newattrs.ia_vfsuid),
+               from_vfsgid(mnt_userns, fs_userns, newattrs.ia_vfsgid));
        if (!error)
                error = notify_change(mnt_userns, path->dentry, &newattrs,
                                      &delegated_inode);
index 714ec56..245e2cb 100644 (file)
@@ -331,8 +331,8 @@ int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upperdentry,
        if (!err) {
                struct iattr attr = {
                        .ia_valid = ATTR_UID | ATTR_GID,
-                       .ia_uid = stat->uid,
-                       .ia_gid = stat->gid,
+                       .ia_vfsuid = VFSUIDT_INIT(stat->uid),
+                       .ia_vfsgid = VFSGIDT_INIT(stat->gid),
                };
                err = ovl_do_notify_change(ofs, upperdentry, &attr);
        }
index 492edde..7922b61 100644 (file)
@@ -454,23 +454,94 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
        return res;
 }
 
+/*
+ * Apply the idmapping of the layer to POSIX ACLs. The caller must pass a clone
+ * of the POSIX ACLs retrieved from the lower layer to this function to not
+ * alter the POSIX ACLs for the underlying filesystem.
+ */
+static void ovl_idmap_posix_acl(struct user_namespace *mnt_userns,
+                               struct posix_acl *acl)
+{
+       for (unsigned int i = 0; i < acl->a_count; i++) {
+               vfsuid_t vfsuid;
+               vfsgid_t vfsgid;
+
+               struct posix_acl_entry *e = &acl->a_entries[i];
+               switch (e->e_tag) {
+               case ACL_USER:
+                       vfsuid = make_vfsuid(mnt_userns, &init_user_ns, e->e_uid);
+                       e->e_uid = vfsuid_into_kuid(vfsuid);
+                       break;
+               case ACL_GROUP:
+                       vfsgid = make_vfsgid(mnt_userns, &init_user_ns, e->e_gid);
+                       e->e_gid = vfsgid_into_kgid(vfsgid);
+                       break;
+               }
+       }
+}
+
+/*
+ * When the relevant layer is an idmapped mount we need to take the idmapping
+ * of the layer into account and translate any ACL_{GROUP,USER} values
+ * according to the idmapped mount.
+ *
+ * We cannot alter the ACLs returned from the relevant layer as that would
+ * alter the cached values filesystem wide for the lower filesystem. Instead we
+ * can clone the ACLs and then apply the relevant idmapping of the layer.
+ *
+ * This is obviously only relevant when idmapped layers are used.
+ */
 struct posix_acl *ovl_get_acl(struct inode *inode, int type, bool rcu)
 {
        struct inode *realinode = ovl_inode_real(inode);
-       const struct cred *old_cred;
-       struct posix_acl *acl;
+       struct posix_acl *acl, *clone;
+       struct path realpath;
 
        if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode))
                return NULL;
 
-       if (rcu)
-               return get_cached_acl_rcu(realinode, type);
+       /* Careful in RCU walk mode */
+       ovl_i_path_real(inode, &realpath);
+       if (!realpath.dentry) {
+               WARN_ON(!rcu);
+               return ERR_PTR(-ECHILD);
+       }
 
-       old_cred = ovl_override_creds(inode->i_sb);
-       acl = get_acl(realinode, type);
-       revert_creds(old_cred);
+       if (rcu) {
+               acl = get_cached_acl_rcu(realinode, type);
+       } else {
+               const struct cred *old_cred;
+
+               old_cred = ovl_override_creds(inode->i_sb);
+               acl = get_acl(realinode, type);
+               revert_creds(old_cred);
+       }
+       /*
+        * If there are no POSIX ACLs, or we encountered an error,
+        * or the layer isn't idmapped we don't need to do anything.
+        */
+       if (!is_idmapped_mnt(realpath.mnt) || IS_ERR_OR_NULL(acl))
+               return acl;
 
-       return acl;
+       /*
+        * We only get here if the layer is idmapped. So drop out of RCU path
+        * walk so we can clone the ACLs. There's no need to release the ACLs
+        * since get_cached_acl_rcu() doesn't take a reference on the ACLs.
+        */
+       if (rcu)
+               return ERR_PTR(-ECHILD);
+
+       clone = posix_acl_clone(acl, GFP_KERNEL);
+       if (!clone)
+               clone = ERR_PTR(-ENOMEM);
+       else
+               ovl_idmap_posix_acl(mnt_user_ns(realpath.mnt), clone);
+       /*
+        * Since we're not in RCU path walk we always need to release the
+        * original ACLs.
+        */
+       posix_acl_release(acl);
+       return clone;
 }
 
 int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags)
index 4f34b7e..6ec815b 100644 (file)
@@ -139,17 +139,7 @@ static inline int ovl_do_notify_change(struct ovl_fs *ofs,
                                       struct dentry *upperdentry,
                                       struct iattr *attr)
 {
-       struct user_namespace *upper_mnt_userns = ovl_upper_mnt_userns(ofs);
-       struct user_namespace *fs_userns = i_user_ns(d_inode(upperdentry));
-
-       if (attr->ia_valid & ATTR_UID)
-               attr->ia_uid = mapped_kuid_user(upper_mnt_userns,
-                                               fs_userns, attr->ia_uid);
-       if (attr->ia_valid & ATTR_GID)
-               attr->ia_gid = mapped_kgid_user(upper_mnt_userns,
-                                               fs_userns, attr->ia_gid);
-
-       return notify_change(upper_mnt_userns, upperdentry, attr, NULL);
+       return notify_change(ovl_upper_mnt_userns(ofs), upperdentry, attr, NULL);
 }
 
 static inline int ovl_do_rmdir(struct ovl_fs *ofs,
@@ -259,7 +249,8 @@ static inline int ovl_do_setxattr(struct ovl_fs *ofs, struct dentry *dentry,
                                  const char *name, const void *value,
                                  size_t size, int flags)
 {
-       int err = vfs_setxattr(ovl_upper_mnt_userns(ofs), dentry, name, value, size, flags);
+       int err = vfs_setxattr(ovl_upper_mnt_userns(ofs), dentry, name,
+                              (void *)value, size, flags);
 
        pr_debug("setxattr(%pd2, \"%s\", \"%*pE\", %zu, %d) = %i\n",
                 dentry, name, min((int)size, 48), value, size, flags, err);
index 962d324..1d17d7b 100644 (file)
@@ -199,7 +199,7 @@ EXPORT_SYMBOL(posix_acl_alloc);
 /*
  * Clone an ACL.
  */
-static struct posix_acl *
+struct posix_acl *
 posix_acl_clone(const struct posix_acl *acl, gfp_t flags)
 {
        struct posix_acl *clone = NULL;
@@ -213,6 +213,7 @@ posix_acl_clone(const struct posix_acl *acl, gfp_t flags)
        }
        return clone;
 }
+EXPORT_SYMBOL_GPL(posix_acl_clone);
 
 /*
  * Check if an acl is valid. Returns 0 if it is, or -E... otherwise.
@@ -361,8 +362,8 @@ posix_acl_permission(struct user_namespace *mnt_userns, struct inode *inode,
 {
        const struct posix_acl_entry *pa, *pe, *mask_obj;
        int found = 0;
-       kuid_t uid;
-       kgid_t gid;
+       vfsuid_t vfsuid;
+       vfsgid_t vfsgid;
 
        want &= MAY_READ | MAY_WRITE | MAY_EXEC;
 
@@ -370,30 +371,28 @@ posix_acl_permission(struct user_namespace *mnt_userns, struct inode *inode,
                 switch(pa->e_tag) {
                         case ACL_USER_OBJ:
                                /* (May have been checked already) */
-                               uid = i_uid_into_mnt(mnt_userns, inode);
-                               if (uid_eq(uid, current_fsuid()))
+                               vfsuid = i_uid_into_vfsuid(mnt_userns, inode);
+                               if (vfsuid_eq_kuid(vfsuid, current_fsuid()))
                                         goto check_perm;
                                 break;
                         case ACL_USER:
-                               uid = mapped_kuid_fs(mnt_userns,
-                                                    i_user_ns(inode),
+                               vfsuid = make_vfsuid(mnt_userns, &init_user_ns,
                                                     pa->e_uid);
-                               if (uid_eq(uid, current_fsuid()))
+                               if (vfsuid_eq_kuid(vfsuid, current_fsuid()))
                                         goto mask;
                                break;
                         case ACL_GROUP_OBJ:
-                               gid = i_gid_into_mnt(mnt_userns, inode);
-                               if (in_group_p(gid)) {
+                               vfsgid = i_gid_into_vfsgid(mnt_userns, inode);
+                               if (vfsgid_in_group_p(vfsgid)) {
                                        found = 1;
                                        if ((pa->e_perm & want) == want)
                                                goto mask;
                                 }
                                break;
                         case ACL_GROUP:
-                               gid = mapped_kgid_fs(mnt_userns,
-                                                    i_user_ns(inode),
+                               vfsgid = make_vfsgid(mnt_userns, &init_user_ns,
                                                     pa->e_gid);
-                               if (in_group_p(gid)) {
+                               if (vfsgid_in_group_p(vfsgid)) {
                                        found = 1;
                                        if ((pa->e_perm & want) == want)
                                                goto mask;
@@ -699,7 +698,7 @@ int posix_acl_update_mode(struct user_namespace *mnt_userns,
                return error;
        if (error == 0)
                *acl = NULL;
-       if (!in_group_p(i_gid_into_mnt(mnt_userns, inode)) &&
+       if (!vfsgid_in_group_p(i_gid_into_vfsgid(mnt_userns, inode)) &&
            !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
                mode &= ~S_ISGID;
        *mode_p = mode;
@@ -710,46 +709,127 @@ EXPORT_SYMBOL(posix_acl_update_mode);
 /*
  * Fix up the uids and gids in posix acl extended attributes in place.
  */
-static void posix_acl_fix_xattr_userns(
-       struct user_namespace *to, struct user_namespace *from,
-       struct user_namespace *mnt_userns,
-       void *value, size_t size, bool from_user)
+static int posix_acl_fix_xattr_common(void *value, size_t size)
+{
+       struct posix_acl_xattr_header *header = value;
+       int count;
+
+       if (!header)
+               return -EINVAL;
+       if (size < sizeof(struct posix_acl_xattr_header))
+               return -EINVAL;
+       if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
+               return -EINVAL;
+
+       count = posix_acl_xattr_count(size);
+       if (count < 0)
+               return -EINVAL;
+       if (count == 0)
+               return -EINVAL;
+
+       return count;
+}
+
+void posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns,
+                                    const struct inode *inode,
+                                    void *value, size_t size)
 {
        struct posix_acl_xattr_header *header = value;
        struct posix_acl_xattr_entry *entry = (void *)(header + 1), *end;
        int count;
+       vfsuid_t vfsuid;
+       vfsgid_t vfsgid;
        kuid_t uid;
        kgid_t gid;
 
-       if (!value)
+       if (no_idmapping(mnt_userns, i_user_ns(inode)))
                return;
-       if (size < sizeof(struct posix_acl_xattr_header))
+
+       count = posix_acl_fix_xattr_common(value, size);
+       if (count < 0)
                return;
-       if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
+
+       for (end = entry + count; entry != end; entry++) {
+               switch (le16_to_cpu(entry->e_tag)) {
+               case ACL_USER:
+                       uid = make_kuid(&init_user_ns, le32_to_cpu(entry->e_id));
+                       vfsuid = make_vfsuid(mnt_userns, &init_user_ns, uid);
+                       entry->e_id = cpu_to_le32(from_kuid(&init_user_ns,
+                                               vfsuid_into_kuid(vfsuid)));
+                       break;
+               case ACL_GROUP:
+                       gid = make_kgid(&init_user_ns, le32_to_cpu(entry->e_id));
+                       vfsgid = make_vfsgid(mnt_userns, &init_user_ns, gid);
+                       entry->e_id = cpu_to_le32(from_kgid(&init_user_ns,
+                                               vfsgid_into_kgid(vfsgid)));
+                       break;
+               default:
+                       break;
+               }
+       }
+}
+
+void posix_acl_setxattr_idmapped_mnt(struct user_namespace *mnt_userns,
+                                    const struct inode *inode,
+                                    void *value, size_t size)
+{
+       struct posix_acl_xattr_header *header = value;
+       struct posix_acl_xattr_entry *entry = (void *)(header + 1), *end;
+       int count;
+       vfsuid_t vfsuid;
+       vfsgid_t vfsgid;
+       kuid_t uid;
+       kgid_t gid;
+
+       if (no_idmapping(mnt_userns, i_user_ns(inode)))
                return;
 
-       count = posix_acl_xattr_count(size);
+       count = posix_acl_fix_xattr_common(value, size);
        if (count < 0)
                return;
-       if (count == 0)
+
+       for (end = entry + count; entry != end; entry++) {
+               switch (le16_to_cpu(entry->e_tag)) {
+               case ACL_USER:
+                       uid = make_kuid(&init_user_ns, le32_to_cpu(entry->e_id));
+                       vfsuid = VFSUIDT_INIT(uid);
+                       uid = from_vfsuid(mnt_userns, &init_user_ns, vfsuid);
+                       entry->e_id = cpu_to_le32(from_kuid(&init_user_ns, uid));
+                       break;
+               case ACL_GROUP:
+                       gid = make_kgid(&init_user_ns, le32_to_cpu(entry->e_id));
+                       vfsgid = VFSGIDT_INIT(gid);
+                       gid = from_vfsgid(mnt_userns, &init_user_ns, vfsgid);
+                       entry->e_id = cpu_to_le32(from_kgid(&init_user_ns, gid));
+                       break;
+               default:
+                       break;
+               }
+       }
+}
+
+static void posix_acl_fix_xattr_userns(
+       struct user_namespace *to, struct user_namespace *from,
+       void *value, size_t size)
+{
+       struct posix_acl_xattr_header *header = value;
+       struct posix_acl_xattr_entry *entry = (void *)(header + 1), *end;
+       int count;
+       kuid_t uid;
+       kgid_t gid;
+
+       count = posix_acl_fix_xattr_common(value, size);
+       if (count < 0)
                return;
 
        for (end = entry + count; entry != end; entry++) {
                switch(le16_to_cpu(entry->e_tag)) {
                case ACL_USER:
                        uid = make_kuid(from, le32_to_cpu(entry->e_id));
-                       if (from_user)
-                               uid = mapped_kuid_user(mnt_userns, &init_user_ns, uid);
-                       else
-                               uid = mapped_kuid_fs(mnt_userns, &init_user_ns, uid);
                        entry->e_id = cpu_to_le32(from_kuid(to, uid));
                        break;
                case ACL_GROUP:
                        gid = make_kgid(from, le32_to_cpu(entry->e_id));
-                       if (from_user)
-                               gid = mapped_kgid_user(mnt_userns, &init_user_ns, gid);
-                       else
-                               gid = mapped_kgid_fs(mnt_userns, &init_user_ns, gid);
                        entry->e_id = cpu_to_le32(from_kgid(to, gid));
                        break;
                default:
@@ -758,34 +838,20 @@ static void posix_acl_fix_xattr_userns(
        }
 }
 
-void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
-                                  struct inode *inode,
-                                  void *value, size_t size)
+void posix_acl_fix_xattr_from_user(void *value, size_t size)
 {
        struct user_namespace *user_ns = current_user_ns();
-
-       /* Leave ids untouched on non-idmapped mounts. */
-       if (no_idmapping(mnt_userns, i_user_ns(inode)))
-               mnt_userns = &init_user_ns;
-       if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns))
+       if (user_ns == &init_user_ns)
                return;
-       posix_acl_fix_xattr_userns(&init_user_ns, user_ns, mnt_userns, value,
-                                  size, true);
+       posix_acl_fix_xattr_userns(&init_user_ns, user_ns, value, size);
 }
 
-void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
-                                struct inode *inode,
-                                void *value, size_t size)
+void posix_acl_fix_xattr_to_user(void *value, size_t size)
 {
        struct user_namespace *user_ns = current_user_ns();
-
-       /* Leave ids untouched on non-idmapped mounts. */
-       if (no_idmapping(mnt_userns, i_user_ns(inode)))
-               mnt_userns = &init_user_ns;
-       if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns))
+       if (user_ns == &init_user_ns)
                return;
-       posix_acl_fix_xattr_userns(user_ns, &init_user_ns, mnt_userns, value,
-                                  size, false);
+       posix_acl_fix_xattr_userns(user_ns, &init_user_ns, value, size);
 }
 
 /*
index 09d1307..28966da 100644 (file)
@@ -2085,7 +2085,8 @@ EXPORT_SYMBOL(__dquot_transfer);
 /* Wrapper for transferring ownership of an inode for uid/gid only
  * Called from FSXXX_setattr()
  */
-int dquot_transfer(struct inode *inode, struct iattr *iattr)
+int dquot_transfer(struct user_namespace *mnt_userns, struct inode *inode,
+                  struct iattr *iattr)
 {
        struct dquot *transfer_to[MAXQUOTAS] = {};
        struct dquot *dquot;
@@ -2095,8 +2096,11 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
        if (!dquot_active(inode))
                return 0;
 
-       if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)){
-               dquot = dqget(sb, make_kqid_uid(iattr->ia_uid));
+       if (i_uid_needs_update(mnt_userns, iattr, inode)) {
+               kuid_t kuid = from_vfsuid(mnt_userns, i_user_ns(inode),
+                                         iattr->ia_vfsuid);
+
+               dquot = dqget(sb, make_kqid_uid(kuid));
                if (IS_ERR(dquot)) {
                        if (PTR_ERR(dquot) != -ESRCH) {
                                ret = PTR_ERR(dquot);
@@ -2106,8 +2110,11 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
                }
                transfer_to[USRQUOTA] = dquot;
        }
-       if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)){
-               dquot = dqget(sb, make_kqid_gid(iattr->ia_gid));
+       if (i_gid_needs_update(mnt_userns, iattr, inode)) {
+               kgid_t kgid = from_vfsgid(mnt_userns, i_user_ns(inode),
+                                         iattr->ia_vfsgid);
+
+               dquot = dqget(sb, make_kqid_gid(kgid));
                if (IS_ERR(dquot)) {
                        if (PTR_ERR(dquot) != -ESRCH) {
                                ret = PTR_ERR(dquot);
index b1b1cdf..397da02 100644 (file)
@@ -1263,6 +1263,9 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
                                          count, fl);
                file_end_write(out.file);
        } else {
+               if (out.file->f_flags & O_NONBLOCK)
+                       fl |= SPLICE_F_NONBLOCK;
+
                retval = splice_file_to_pipe(in.file, opipe, &pos, count, fl);
        }
 
@@ -1397,28 +1400,6 @@ ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
 }
 EXPORT_SYMBOL(generic_copy_file_range);
 
-static ssize_t do_copy_file_range(struct file *file_in, loff_t pos_in,
-                                 struct file *file_out, loff_t pos_out,
-                                 size_t len, unsigned int flags)
-{
-       /*
-        * Although we now allow filesystems to handle cross sb copy, passing
-        * a file of the wrong filesystem type to filesystem driver can result
-        * in an attempt to dereference the wrong type of ->private_data, so
-        * avoid doing that until we really have a good reason.  NFS defines
-        * several different file_system_type structures, but they all end up
-        * using the same ->copy_file_range() function pointer.
-        */
-       if (file_out->f_op->copy_file_range &&
-           file_out->f_op->copy_file_range == file_in->f_op->copy_file_range)
-               return file_out->f_op->copy_file_range(file_in, pos_in,
-                                                      file_out, pos_out,
-                                                      len, flags);
-
-       return generic_copy_file_range(file_in, pos_in, file_out, pos_out, len,
-                                      flags);
-}
-
 /*
  * Performs necessary checks before doing a file copy
  *
@@ -1440,6 +1421,24 @@ static int generic_copy_file_checks(struct file *file_in, loff_t pos_in,
        if (ret)
                return ret;
 
+       /*
+        * We allow some filesystems to handle cross sb copy, but passing
+        * a file of the wrong filesystem type to filesystem driver can result
+        * in an attempt to dereference the wrong type of ->private_data, so
+        * avoid doing that until we really have a good reason.
+        *
+        * nfs and cifs define several different file_system_type structures
+        * and several different sets of file_operations, but they all end up
+        * using the same ->copy_file_range() function pointer.
+        */
+       if (file_out->f_op->copy_file_range) {
+               if (file_in->f_op->copy_file_range !=
+                   file_out->f_op->copy_file_range)
+                       return -EXDEV;
+       } else if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) {
+               return -EXDEV;
+       }
+
        /* Don't touch certain kinds of inodes */
        if (IS_IMMUTABLE(inode_out))
                return -EPERM;
@@ -1505,26 +1504,41 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
        file_start_write(file_out);
 
        /*
-        * Try cloning first, this is supported by more file systems, and
-        * more efficient if both clone and copy are supported (e.g. NFS).
+        * Cloning is supported by more file systems, so we implement copy on
+        * same sb using clone, but for filesystems where both clone and copy
+        * are supported (e.g. nfs,cifs), we only call the copy method.
         */
+       if (file_out->f_op->copy_file_range) {
+               ret = file_out->f_op->copy_file_range(file_in, pos_in,
+                                                     file_out, pos_out,
+                                                     len, flags);
+               goto done;
+       }
+
        if (file_in->f_op->remap_file_range &&
            file_inode(file_in)->i_sb == file_inode(file_out)->i_sb) {
-               loff_t cloned;
-
-               cloned = file_in->f_op->remap_file_range(file_in, pos_in,
+               ret = file_in->f_op->remap_file_range(file_in, pos_in,
                                file_out, pos_out,
                                min_t(loff_t, MAX_RW_COUNT, len),
                                REMAP_FILE_CAN_SHORTEN);
-               if (cloned > 0) {
-                       ret = cloned;
+               if (ret > 0)
                        goto done;
-               }
        }
 
-       ret = do_copy_file_range(file_in, pos_in, file_out, pos_out, len,
-                               flags);
-       WARN_ON_ONCE(ret == -EOPNOTSUPP);
+       /*
+        * We can get here for same sb copy of filesystems that do not implement
+        * ->copy_file_range() in case filesystem does not support clone or in
+        * case filesystem supports clone but rejected the clone request (e.g.
+        * because it was not block aligned).
+        *
+        * In both cases, fall back to kernel copy so we are able to maintain a
+        * consistent story about which filesystems support copy_file_range()
+        * and which filesystems do not, that will allow userspace tools to
+        * make consistent desicions w.r.t using copy_file_range().
+        */
+       ret = generic_copy_file_range(file_in, pos_in, file_out, pos_out, len,
+                                     flags);
+
 done:
        if (ret > 0) {
                fsnotify_access(file_in);
index 0cffe05..0df48d1 100644 (file)
@@ -290,7 +290,7 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
        struct buffer_head *bh;
        struct item_head *ih, tmp_ih;
        b_blocknr_t blocknr;
-       char *p = NULL;
+       char *p;
        int chars;
        int ret;
        int result;
@@ -305,8 +305,6 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
        result = search_for_position_by_key(inode->i_sb, &key, &path);
        if (result != POSITION_FOUND) {
                pathrelse(&path);
-               if (p)
-                       kunmap(bh_result->b_page);
                if (result == IO_ERROR)
                        return -EIO;
                /*
@@ -352,8 +350,6 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
                }
 
                pathrelse(&path);
-               if (p)
-                       kunmap(bh_result->b_page);
                return ret;
        }
        /* requested data are in direct item(s) */
@@ -363,8 +359,6 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
                 * when it is stored in direct item(s)
                 */
                pathrelse(&path);
-               if (p)
-                       kunmap(bh_result->b_page);
                return -ENOENT;
        }
 
@@ -396,9 +390,7 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
         * sure we need to.  But, this means the item might move if
         * kmap schedules
         */
-       if (!p)
-               p = (char *)kmap(bh_result->b_page);
-
+       p = (char *)kmap(bh_result->b_page);
        p += offset;
        memset(p, 0, inode->i_sb->s_blocksize);
        do {
@@ -3284,7 +3276,7 @@ int reiserfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
        /* must be turned off for recursive notify_change calls */
        ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
 
-       if (is_quota_modification(inode, attr)) {
+       if (is_quota_modification(mnt_userns, inode, attr)) {
                error = dquot_initialize(inode);
                if (error)
                        return error;
@@ -3367,7 +3359,7 @@ int reiserfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                reiserfs_write_unlock(inode->i_sb);
                if (error)
                        goto out;
-               error = dquot_transfer(inode, attr);
+               error = dquot_transfer(mnt_userns, inode, attr);
                reiserfs_write_lock(inode->i_sb);
                if (error) {
                        journal_end(&th);
index e112b54..881a306 100644 (file)
@@ -71,7 +71,8 @@ static int generic_remap_checks(struct file *file_in, loff_t pos_in,
         * Otherwise, make sure the count is also block-aligned, having
         * already confirmed the starting offsets' block alignment.
         */
-       if (pos_in + count == size_in) {
+       if (pos_in + count == size_in &&
+           (!(remap_flags & REMAP_FILE_DEDUP) || pos_out + count == size_out)) {
                bcount = ALIGN(size_in, bs) - pos_in;
        } else {
                if (!IS_ALIGNED(count, bs))
index de72527..81d26ab 100644 (file)
@@ -553,7 +553,7 @@ struct dentry *tracefs_create_dir(const char *name, struct dentry *parent)
  *
  * Only one instances directory is allowed.
  *
- * The instances directory is special as it allows for mkdir and rmdir to
+ * The instances directory is special as it allows for mkdir and rmdir
  * to be done by userspace. When a mkdir or rmdir is performed, the inode
  * locks are released and the methods passed in (@mkdir and @rmdir) are
  * called without locks and with the name of the directory being created
index e943370..de86f5b 100644 (file)
@@ -192,17 +192,19 @@ static inline void msg_init(struct uffd_msg *msg)
 }
 
 static inline struct uffd_msg userfault_msg(unsigned long address,
+                                           unsigned long real_address,
                                            unsigned int flags,
                                            unsigned long reason,
                                            unsigned int features)
 {
        struct uffd_msg msg;
+
        msg_init(&msg);
        msg.event = UFFD_EVENT_PAGEFAULT;
 
-       if (!(features & UFFD_FEATURE_EXACT_ADDRESS))
-               address &= PAGE_MASK;
-       msg.arg.pagefault.address = address;
+       msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ?
+                                   real_address : address;
+
        /*
         * These flags indicate why the userfault occurred:
         * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault.
@@ -488,8 +490,8 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
 
        init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
        uwq.wq.private = current;
-       uwq.msg = userfault_msg(vmf->real_address, vmf->flags, reason,
-                       ctx->features);
+       uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags,
+                               reason, ctx->features);
        uwq.ctx = ctx;
        uwq.waken = false;
 
index e8dd03e..a1f4998 100644 (file)
@@ -282,9 +282,15 @@ out:
 }
 EXPORT_SYMBOL_GPL(__vfs_setxattr_locked);
 
+static inline bool is_posix_acl_xattr(const char *name)
+{
+       return (strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
+              (strcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT) == 0);
+}
+
 int
 vfs_setxattr(struct user_namespace *mnt_userns, struct dentry *dentry,
-            const char *name, const void *value, size_t size, int flags)
+            const char *name, void *value, size_t size, int flags)
 {
        struct inode *inode = dentry->d_inode;
        struct inode *delegated_inode = NULL;
@@ -292,12 +298,16 @@ vfs_setxattr(struct user_namespace *mnt_userns, struct dentry *dentry,
        int error;
 
        if (size && strcmp(name, XATTR_NAME_CAPS) == 0) {
-               error = cap_convert_nscap(mnt_userns, dentry, &value, size);
+               error = cap_convert_nscap(mnt_userns, dentry,
+                                         (const void **)&value, size);
                if (error < 0)
                        return error;
                size = error;
        }
 
+       if (size && is_posix_acl_xattr(name))
+               posix_acl_setxattr_idmapped_mnt(mnt_userns, inode, value, size);
+
 retry_deleg:
        inode_lock(inode);
        error = __vfs_setxattr_locked(mnt_userns, dentry, name, value, size,
@@ -431,7 +441,10 @@ vfs_getxattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                return ret;
        }
 nolsm:
-       return __vfs_getxattr(dentry, inode, name, value, size);
+       error = __vfs_getxattr(dentry, inode, name, value, size);
+       if (error > 0 && is_posix_acl_xattr(name))
+               posix_acl_getxattr_idmapped_mnt(mnt_userns, inode, value, size);
+       return error;
 }
 EXPORT_SYMBOL_GPL(vfs_getxattr);
 
@@ -577,8 +590,7 @@ static void setxattr_convert(struct user_namespace *mnt_userns,
        if (ctx->size &&
                ((strcmp(ctx->kname->name, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
                (strcmp(ctx->kname->name, XATTR_NAME_POSIX_ACL_DEFAULT) == 0)))
-               posix_acl_fix_xattr_from_user(mnt_userns, d_inode(d),
-                                               ctx->kvalue, ctx->size);
+               posix_acl_fix_xattr_from_user(ctx->kvalue, ctx->size);
 }
 
 int do_setxattr(struct user_namespace *mnt_userns, struct dentry *dentry,
@@ -695,8 +707,7 @@ do_getxattr(struct user_namespace *mnt_userns, struct dentry *d,
        if (error > 0) {
                if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
                    (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
-                       posix_acl_fix_xattr_to_user(mnt_userns, d_inode(d),
-                                                       ctx->kvalue, error);
+                       posix_acl_fix_xattr_to_user(ctx->kvalue, error);
                if (ctx->size && copy_to_user(ctx->value, ctx->kvalue, error))
                        error = -EFAULT;
        } else if (error == -ERANGE && ctx->size >= XATTR_SIZE_MAX) {
index 836ab1b..224649a 100644 (file)
@@ -50,7 +50,7 @@ STATIC int xfs_attr_shortform_addname(xfs_da_args_t *args);
 STATIC int xfs_attr_leaf_get(xfs_da_args_t *args);
 STATIC int xfs_attr_leaf_removename(xfs_da_args_t *args);
 STATIC int xfs_attr_leaf_hasname(struct xfs_da_args *args, struct xfs_buf **bp);
-STATIC int xfs_attr_leaf_try_add(struct xfs_da_args *args, struct xfs_buf *bp);
+STATIC int xfs_attr_leaf_try_add(struct xfs_da_args *args);
 
 /*
  * Internal routines when attribute list is more than one block.
@@ -393,16 +393,10 @@ xfs_attr_sf_addname(
         * It won't fit in the shortform, transform to a leaf block.  GROT:
         * another possible req'mt for a double-split btree op.
         */
-       error = xfs_attr_shortform_to_leaf(args, &attr->xattri_leaf_bp);
+       error = xfs_attr_shortform_to_leaf(args);
        if (error)
                return error;
 
-       /*
-        * Prevent the leaf buffer from being unlocked so that a concurrent AIL
-        * push cannot grab the half-baked leaf buffer and run into problems
-        * with the write verifier.
-        */
-       xfs_trans_bhold(args->trans, attr->xattri_leaf_bp);
        attr->xattri_dela_state = XFS_DAS_LEAF_ADD;
 out:
        trace_xfs_attr_sf_addname_return(attr->xattri_dela_state, args->dp);
@@ -447,11 +441,9 @@ xfs_attr_leaf_addname(
 
        /*
         * Use the leaf buffer we may already hold locked as a result of
-        * a sf-to-leaf conversion. The held buffer is no longer valid
-        * after this call, regardless of the result.
+        * a sf-to-leaf conversion.
         */
-       error = xfs_attr_leaf_try_add(args, attr->xattri_leaf_bp);
-       attr->xattri_leaf_bp = NULL;
+       error = xfs_attr_leaf_try_add(args);
 
        if (error == -ENOSPC) {
                error = xfs_attr3_leaf_to_node(args);
@@ -497,8 +489,6 @@ xfs_attr_node_addname(
        struct xfs_da_args      *args = attr->xattri_da_args;
        int                     error;
 
-       ASSERT(!attr->xattri_leaf_bp);
-
        error = xfs_attr_node_addname_find_attr(attr);
        if (error)
                return error;
@@ -997,9 +987,11 @@ xfs_attr_set(
        /*
         * We have no control over the attribute names that userspace passes us
         * to remove, so we have to allow the name lookup prior to attribute
-        * removal to fail as well.
+        * removal to fail as well.  Preserve the logged flag, since we need
+        * to pass that through to the logging code.
         */
-       args->op_flags = XFS_DA_OP_OKNOENT;
+       args->op_flags = XFS_DA_OP_OKNOENT |
+                                       (args->op_flags & XFS_DA_OP_LOGGED);
 
        if (args->value) {
                XFS_STATS_INC(mp, xs_attr_set);
@@ -1213,24 +1205,14 @@ xfs_attr_restore_rmt_blk(
  */
 STATIC int
 xfs_attr_leaf_try_add(
-       struct xfs_da_args      *args,
-       struct xfs_buf          *bp)
+       struct xfs_da_args      *args)
 {
+       struct xfs_buf          *bp;
        int                     error;
 
-       /*
-        * If the caller provided a buffer to us, it is locked and held in
-        * the transaction because it just did a shortform to leaf conversion.
-        * Hence we don't need to read it again. Otherwise read in the leaf
-        * buffer.
-        */
-       if (bp) {
-               xfs_trans_bhold_release(args->trans, bp);
-       } else {
-               error = xfs_attr3_leaf_read(args->trans, args->dp, 0, &bp);
-               if (error)
-                       return error;
-       }
+       error = xfs_attr3_leaf_read(args->trans, args->dp, 0, &bp);
+       if (error)
+               return error;
 
        /*
         * Look up the xattr name to set the insertion point for the new xattr.
@@ -1439,12 +1421,11 @@ static int
 xfs_attr_node_try_addname(
        struct xfs_attr_intent          *attr)
 {
-       struct xfs_da_args              *args = attr->xattri_da_args;
        struct xfs_da_state             *state = attr->xattri_da_state;
        struct xfs_da_state_blk         *blk;
        int                             error;
 
-       trace_xfs_attr_node_addname(args);
+       trace_xfs_attr_node_addname(state->args);
 
        blk = &state->path.blk[state->path.active-1];
        ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
index e329da3..dfb47fa 100644 (file)
@@ -28,16 +28,6 @@ struct xfs_attr_list_context;
  */
 #define        ATTR_MAX_VALUELEN       (64*1024)       /* max length of a value */
 
-static inline bool xfs_has_larp(struct xfs_mount *mp)
-{
-#ifdef DEBUG
-       /* Logged xattrs require a V5 super for log_incompat */
-       return xfs_has_crc(mp) && xfs_globals.larp;
-#else
-       return false;
-#endif
-}
-
 /*
  * Kernel-internal version of the attrlist cursor.
  */
@@ -525,11 +515,6 @@ struct xfs_attr_intent {
         */
        struct xfs_attri_log_nameval    *xattri_nameval;
 
-       /*
-        * Used by xfs_attr_set to hold a leaf buffer across a transaction roll
-        */
-       struct xfs_buf                  *xattri_leaf_bp;
-
        /* Used to keep track of current state of delayed operation */
        enum xfs_delattr_state          xattri_dela_state;
 
@@ -624,7 +609,7 @@ static inline enum xfs_delattr_state
 xfs_attr_init_replace_state(struct xfs_da_args *args)
 {
        args->op_flags |= XFS_DA_OP_ADDNAME | XFS_DA_OP_REPLACE;
-       if (xfs_has_larp(args->dp->i_mount))
+       if (args->op_flags & XFS_DA_OP_LOGGED)
                return xfs_attr_init_remove_state(args);
        return xfs_attr_init_add_state(args);
 }
index 15a9904..8f47396 100644 (file)
@@ -289,6 +289,23 @@ xfs_attr3_leaf_verify_entry(
        return NULL;
 }
 
+/*
+ * Validate an attribute leaf block.
+ *
+ * Empty leaf blocks can occur under the following circumstances:
+ *
+ * 1. setxattr adds a new extended attribute to a file;
+ * 2. The file has zero existing attributes;
+ * 3. The attribute is too large to fit in the attribute fork;
+ * 4. The attribute is small enough to fit in a leaf block;
+ * 5. A log flush occurs after committing the transaction that creates
+ *    the (empty) leaf block; and
+ * 6. The filesystem goes down after the log flush but before the new
+ *    attribute can be committed to the leaf block.
+ *
+ * Hence we need to ensure that we don't fail the validation purely
+ * because the leaf is empty.
+ */
 static xfs_failaddr_t
 xfs_attr3_leaf_verify(
        struct xfs_buf                  *bp)
@@ -311,15 +328,6 @@ xfs_attr3_leaf_verify(
                return fa;
 
        /*
-        * Empty leaf blocks should never occur;  they imply the existence of a
-        * software bug that needs fixing. xfs_repair also flags them as a
-        * corruption that needs fixing, so we should never let these go to
-        * disk.
-        */
-       if (ichdr.count == 0)
-               return __this_address;
-
-       /*
         * firstused is the block offset of the first name info structure.
         * Make sure it doesn't go off the block or crash into the header.
         */
@@ -922,14 +930,10 @@ xfs_attr_shortform_getvalue(
        return -ENOATTR;
 }
 
-/*
- * Convert from using the shortform to the leaf.  On success, return the
- * buffer so that we can keep it locked until we're totally done with it.
- */
+/* Convert from using the shortform to the leaf format. */
 int
 xfs_attr_shortform_to_leaf(
-       struct xfs_da_args              *args,
-       struct xfs_buf                  **leaf_bp)
+       struct xfs_da_args              *args)
 {
        struct xfs_inode                *dp;
        struct xfs_attr_shortform       *sf;
@@ -991,7 +995,6 @@ xfs_attr_shortform_to_leaf(
                sfe = xfs_attr_sf_nextentry(sfe);
        }
        error = 0;
-       *leaf_bp = bp;
 out:
        kmem_free(tmpbuffer);
        return error;
@@ -1530,7 +1533,7 @@ xfs_attr3_leaf_add_work(
        if (tmp)
                entry->flags |= XFS_ATTR_LOCAL;
        if (args->op_flags & XFS_DA_OP_REPLACE) {
-               if (!xfs_has_larp(mp))
+               if (!(args->op_flags & XFS_DA_OP_LOGGED))
                        entry->flags |= XFS_ATTR_INCOMPLETE;
                if ((args->blkno2 == args->blkno) &&
                    (args->index2 <= args->index)) {
index efa757f..368f4d9 100644 (file)
@@ -49,8 +49,7 @@ void  xfs_attr_shortform_create(struct xfs_da_args *args);
 void   xfs_attr_shortform_add(struct xfs_da_args *args, int forkoff);
 int    xfs_attr_shortform_lookup(struct xfs_da_args *args);
 int    xfs_attr_shortform_getvalue(struct xfs_da_args *args);
-int    xfs_attr_shortform_to_leaf(struct xfs_da_args *args,
-                       struct xfs_buf **leaf_bp);
+int    xfs_attr_shortform_to_leaf(struct xfs_da_args *args);
 int    xfs_attr_sf_removename(struct xfs_da_args *args);
 int    xfs_attr_sf_findname(struct xfs_da_args *args,
                             struct xfs_attr_sf_entry **sfep,
index d33b768..ffa3df5 100644 (file)
@@ -92,6 +92,7 @@ typedef struct xfs_da_args {
 #define XFS_DA_OP_NOTIME       (1u << 5) /* don't update inode timestamps */
 #define XFS_DA_OP_REMOVE       (1u << 6) /* this is a remove operation */
 #define XFS_DA_OP_RECOVERY     (1u << 7) /* Log recovery operation */
+#define XFS_DA_OP_LOGGED       (1u << 8) /* Use intent items to track op */
 
 #define XFS_DA_OP_FLAGS \
        { XFS_DA_OP_JUSTCHECK,  "JUSTCHECK" }, \
@@ -101,7 +102,8 @@ typedef struct xfs_da_args {
        { XFS_DA_OP_CILOOKUP,   "CILOOKUP" }, \
        { XFS_DA_OP_NOTIME,     "NOTIME" }, \
        { XFS_DA_OP_REMOVE,     "REMOVE" }, \
-       { XFS_DA_OP_RECOVERY,   "RECOVERY" }
+       { XFS_DA_OP_RECOVERY,   "RECOVERY" }, \
+       { XFS_DA_OP_LOGGED,     "LOGGED" }
 
 /*
  * Storage for holding state during Btree searches and split/join ops.
index 4a28c2d..5077a7a 100644 (file)
@@ -413,18 +413,20 @@ xfs_attr_create_intent(
        struct xfs_mount                *mp = tp->t_mountp;
        struct xfs_attri_log_item       *attrip;
        struct xfs_attr_intent          *attr;
+       struct xfs_da_args              *args;
 
        ASSERT(count == 1);
 
-       if (!xfs_sb_version_haslogxattrs(&mp->m_sb))
-               return NULL;
-
        /*
         * Each attr item only performs one attribute operation at a time, so
         * this is a list of one
         */
        attr = list_first_entry_or_null(items, struct xfs_attr_intent,
                        xattri_list);
+       args = attr->xattri_da_args;
+
+       if (!(args->op_flags & XFS_DA_OP_LOGGED))
+               return NULL;
 
        /*
         * Create a buffer to store the attribute name and value.  This buffer
@@ -432,8 +434,6 @@ xfs_attr_create_intent(
         * and the lower level xattr log items.
         */
        if (!attr->xattri_nameval) {
-               struct xfs_da_args      *args = attr->xattri_da_args;
-
                /*
                 * Transfer our reference to the name/value buffer to the
                 * deferred work state structure.
@@ -576,7 +576,7 @@ xfs_attri_item_recover(
        struct xfs_trans_res            tres;
        struct xfs_attri_log_format     *attrp;
        struct xfs_attri_log_nameval    *nv = attrip->attri_nameval;
-       int                             error, ret = 0;
+       int                             error;
        int                             total;
        int                             local;
        struct xfs_attrd_log_item       *done_item = NULL;
@@ -617,7 +617,10 @@ xfs_attri_item_recover(
        args->namelen = nv->name.i_len;
        args->hashval = xfs_da_hashname(args->name, args->namelen);
        args->attr_filter = attrp->alfi_attr_filter & XFS_ATTRI_FILTER_MASK;
-       args->op_flags = XFS_DA_OP_RECOVERY | XFS_DA_OP_OKNOENT;
+       args->op_flags = XFS_DA_OP_RECOVERY | XFS_DA_OP_OKNOENT |
+                        XFS_DA_OP_LOGGED;
+
+       ASSERT(xfs_sb_version_haslogxattrs(&mp->m_sb));
 
        switch (attr->xattri_op_flags) {
        case XFS_ATTRI_OP_FLAGS_SET:
@@ -652,29 +655,32 @@ xfs_attri_item_recover(
        xfs_ilock(ip, XFS_ILOCK_EXCL);
        xfs_trans_ijoin(tp, ip, 0);
 
-       ret = xfs_xattri_finish_update(attr, done_item);
-       if (ret == -EAGAIN) {
-               /* There's more work to do, so add it to this transaction */
+       error = xfs_xattri_finish_update(attr, done_item);
+       if (error == -EAGAIN) {
+               /*
+                * There's more work to do, so add the intent item to this
+                * transaction so that we can continue it later.
+                */
                xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_ATTR, &attr->xattri_list);
-       } else
-               error = ret;
+               error = xfs_defer_ops_capture_and_commit(tp, capture_list);
+               if (error)
+                       goto out_unlock;
 
+               xfs_iunlock(ip, XFS_ILOCK_EXCL);
+               xfs_irele(ip);
+               return 0;
+       }
        if (error) {
                xfs_trans_cancel(tp);
                goto out_unlock;
        }
 
        error = xfs_defer_ops_capture_and_commit(tp, capture_list);
-
 out_unlock:
-       if (attr->xattri_leaf_bp)
-               xfs_buf_relse(attr->xattri_leaf_bp);
-
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
        xfs_irele(ip);
 out:
-       if (ret != -EAGAIN)
-               xfs_attr_free_item(attr);
+       xfs_attr_free_item(attr);
        return error;
 }
 
index 52be583..85e1a26 100644 (file)
@@ -686,6 +686,8 @@ xfs_can_free_eofblocks(
         * forever.
         */
        end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
+       if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1)
+               end_fsb = roundup_64(end_fsb, mp->m_sb.sb_rextsize);
        last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
        if (last_fsb <= end_fsb)
                return false;
index 5269354..2609825 100644 (file)
@@ -440,7 +440,7 @@ xfs_inodegc_queue_all(
        for_each_online_cpu(cpu) {
                gc = per_cpu_ptr(mp->m_inodegc, cpu);
                if (!llist_empty(&gc->list))
-                       queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
+                       mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
        }
 }
 
@@ -1841,8 +1841,8 @@ void
 xfs_inodegc_worker(
        struct work_struct      *work)
 {
-       struct xfs_inodegc      *gc = container_of(work, struct xfs_inodegc,
-                                                       work);
+       struct xfs_inodegc      *gc = container_of(to_delayed_work(work),
+                                               struct xfs_inodegc, work);
        struct llist_node       *node = llist_del_all(&gc->list);
        struct xfs_inode        *ip, *n;
 
@@ -1862,19 +1862,29 @@ xfs_inodegc_worker(
 }
 
 /*
- * Force all currently queued inode inactivation work to run immediately and
- * wait for the work to finish.
+ * Expedite all pending inodegc work to run immediately. This does not wait for
+ * completion of the work.
  */
 void
-xfs_inodegc_flush(
+xfs_inodegc_push(
        struct xfs_mount        *mp)
 {
        if (!xfs_is_inodegc_enabled(mp))
                return;
+       trace_xfs_inodegc_push(mp, __return_address);
+       xfs_inodegc_queue_all(mp);
+}
 
+/*
+ * Force all currently queued inode inactivation work to run immediately and
+ * wait for the work to finish.
+ */
+void
+xfs_inodegc_flush(
+       struct xfs_mount        *mp)
+{
+       xfs_inodegc_push(mp);
        trace_xfs_inodegc_flush(mp, __return_address);
-
-       xfs_inodegc_queue_all(mp);
        flush_workqueue(mp->m_inodegc_wq);
 }
 
@@ -2014,6 +2024,7 @@ xfs_inodegc_queue(
        struct xfs_inodegc      *gc;
        int                     items;
        unsigned int            shrinker_hits;
+       unsigned long           queue_delay = 1;
 
        trace_xfs_inode_set_need_inactive(ip);
        spin_lock(&ip->i_flags_lock);
@@ -2025,19 +2036,26 @@ xfs_inodegc_queue(
        items = READ_ONCE(gc->items);
        WRITE_ONCE(gc->items, items + 1);
        shrinker_hits = READ_ONCE(gc->shrinker_hits);
-       put_cpu_ptr(gc);
 
-       if (!xfs_is_inodegc_enabled(mp))
+       /*
+        * We queue the work while holding the current CPU so that the work
+        * is scheduled to run on this CPU.
+        */
+       if (!xfs_is_inodegc_enabled(mp)) {
+               put_cpu_ptr(gc);
                return;
-
-       if (xfs_inodegc_want_queue_work(ip, items)) {
-               trace_xfs_inodegc_queue(mp, __return_address);
-               queue_work(mp->m_inodegc_wq, &gc->work);
        }
 
+       if (xfs_inodegc_want_queue_work(ip, items))
+               queue_delay = 0;
+
+       trace_xfs_inodegc_queue(mp, __return_address);
+       mod_delayed_work(mp->m_inodegc_wq, &gc->work, queue_delay);
+       put_cpu_ptr(gc);
+
        if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
                trace_xfs_inodegc_throttle(mp, __return_address);
-               flush_work(&gc->work);
+               flush_delayed_work(&gc->work);
        }
 }
 
@@ -2054,7 +2072,7 @@ xfs_inodegc_cpu_dead(
        unsigned int            count = 0;
 
        dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
-       cancel_work_sync(&dead_gc->work);
+       cancel_delayed_work_sync(&dead_gc->work);
 
        if (llist_empty(&dead_gc->list))
                return;
@@ -2073,12 +2091,12 @@ xfs_inodegc_cpu_dead(
        llist_add_batch(first, last, &gc->list);
        count += READ_ONCE(gc->items);
        WRITE_ONCE(gc->items, count);
-       put_cpu_ptr(gc);
 
        if (xfs_is_inodegc_enabled(mp)) {
                trace_xfs_inodegc_queue(mp, __return_address);
-               queue_work(mp->m_inodegc_wq, &gc->work);
+               mod_delayed_work(mp->m_inodegc_wq, &gc->work, 0);
        }
+       put_cpu_ptr(gc);
 }
 
 /*
@@ -2173,7 +2191,7 @@ xfs_inodegc_shrinker_scan(
                        unsigned int    h = READ_ONCE(gc->shrinker_hits);
 
                        WRITE_ONCE(gc->shrinker_hits, h + 1);
-                       queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
+                       mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
                        no_items = false;
                }
        }
index 2e4cfdd..6cd1807 100644 (file)
@@ -76,6 +76,7 @@ void xfs_blockgc_stop(struct xfs_mount *mp);
 void xfs_blockgc_start(struct xfs_mount *mp);
 
 void xfs_inodegc_worker(struct work_struct *work);
+void xfs_inodegc_push(struct xfs_mount *mp);
 void xfs_inodegc_flush(struct xfs_mount *mp);
 void xfs_inodegc_stop(struct xfs_mount *mp);
 void xfs_inodegc_start(struct xfs_mount *mp);
index 52d6f2c..3e1c62f 100644 (file)
@@ -132,6 +132,26 @@ xfs_ilock_attr_map_shared(
 }
 
 /*
+ * You can't set both SHARED and EXCL for the same lock,
+ * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED,
+ * XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values
+ * to set in lock_flags.
+ */
+static inline void
+xfs_lock_flags_assert(
+       uint            lock_flags)
+{
+       ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
+               (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
+       ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
+               (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
+       ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
+               (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
+       ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
+       ASSERT(lock_flags != 0);
+}
+
+/*
  * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
  * multi-reader locks: invalidate_lock and the i_lock.  This routine allows
  * various combinations of the locks to be obtained.
@@ -168,18 +188,7 @@ xfs_ilock(
 {
        trace_xfs_ilock(ip, lock_flags, _RET_IP_);
 
-       /*
-        * You can't set both SHARED and EXCL for the same lock,
-        * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
-        * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
-        */
-       ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
-              (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
-       ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
-              (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
-       ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
-              (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
-       ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
+       xfs_lock_flags_assert(lock_flags);
 
        if (lock_flags & XFS_IOLOCK_EXCL) {
                down_write_nested(&VFS_I(ip)->i_rwsem,
@@ -222,18 +231,7 @@ xfs_ilock_nowait(
 {
        trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
 
-       /*
-        * You can't set both SHARED and EXCL for the same lock,
-        * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
-        * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
-        */
-       ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
-              (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
-       ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
-              (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
-       ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
-              (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
-       ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
+       xfs_lock_flags_assert(lock_flags);
 
        if (lock_flags & XFS_IOLOCK_EXCL) {
                if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
@@ -291,19 +289,7 @@ xfs_iunlock(
        xfs_inode_t             *ip,
        uint                    lock_flags)
 {
-       /*
-        * You can't set both SHARED and EXCL for the same lock,
-        * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
-        * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
-        */
-       ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
-              (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
-       ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
-              (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
-       ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
-              (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
-       ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
-       ASSERT(lock_flags != 0);
+       xfs_lock_flags_assert(lock_flags);
 
        if (lock_flags & XFS_IOLOCK_EXCL)
                up_write(&VFS_I(ip)->i_rwsem);
@@ -379,8 +365,8 @@ xfs_isilocked(
        }
 
        if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
-               return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
-                               (lock_flags & XFS_IOLOCK_SHARED));
+               return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock,
+                               (lock_flags & XFS_MMAPLOCK_SHARED));
        }
 
        if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
index 5a364a7..0d67ff8 100644 (file)
@@ -1096,7 +1096,8 @@ xfs_flags2diflags2(
 {
        uint64_t                di_flags2 =
                (ip->i_diflags2 & (XFS_DIFLAG2_REFLINK |
-                                  XFS_DIFLAG2_BIGTIME));
+                                  XFS_DIFLAG2_BIGTIME |
+                                  XFS_DIFLAG2_NREXT64));
 
        if (xflags & FS_XFLAG_DAX)
                di_flags2 |= XFS_DIFLAG2_DAX;
index 29f5b8b..a7402f6 100644 (file)
@@ -667,13 +667,15 @@ xfs_setattr_nonsize(
                uint    qflags = 0;
 
                if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) {
-                       uid = iattr->ia_uid;
+                       uid = from_vfsuid(mnt_userns, i_user_ns(inode),
+                                         iattr->ia_vfsuid);
                        qflags |= XFS_QMOPT_UQUOTA;
                } else {
                        uid = inode->i_uid;
                }
                if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {
-                       gid = iattr->ia_gid;
+                       gid = from_vfsgid(mnt_userns, i_user_ns(inode),
+                                         iattr->ia_vfsgid);
                        qflags |= XFS_QMOPT_GQUOTA;
                }  else {
                        gid = inode->i_gid;
@@ -704,13 +706,13 @@ xfs_setattr_nonsize(
         * didn't have the inode locked, inode's dquot(s) would have changed
         * also.
         */
-       if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp) &&
-           !uid_eq(inode->i_uid, iattr->ia_uid)) {
+       if (XFS_IS_UQUOTA_ON(mp) &&
+           i_uid_needs_update(mnt_userns, iattr, inode)) {
                ASSERT(udqp);
                old_udqp = xfs_qm_vop_chown(tp, ip, &ip->i_udquot, udqp);
        }
-       if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp) &&
-           !gid_eq(inode->i_gid, iattr->ia_gid)) {
+       if (XFS_IS_GQUOTA_ON(mp) &&
+           i_gid_needs_update(mnt_userns, iattr, inode)) {
                ASSERT(xfs_has_pquotino(mp) || !XFS_IS_PQUOTA_ON(mp));
                ASSERT(gdqp);
                old_gdqp = xfs_qm_vop_chown(tp, ip, &ip->i_gdquot, gdqp);
index 1e972f8..ae904b2 100644 (file)
@@ -2092,8 +2092,6 @@ xlog_dealloc_log(
        xlog_in_core_t  *iclog, *next_iclog;
        int             i;
 
-       xlog_cil_destroy(log);
-
        /*
         * Cycle all the iclogbuf locks to make sure all log IO completion
         * is done before we tear down these buffers.
@@ -2105,6 +2103,13 @@ xlog_dealloc_log(
                iclog = iclog->ic_next;
        }
 
+       /*
+        * Destroy the CIL after waiting for iclog IO completion because an
+        * iclog EIO error will try to shut down the log, which accesses the
+        * CIL to wake up the waiters.
+        */
+       xlog_cil_destroy(log);
+
        iclog = log->l_iclog;
        for (i = 0; i < log->l_iclog_bufs; i++) {
                next_iclog = iclog->ic_next;
index ba5d42a..d2eaebd 100644 (file)
@@ -61,7 +61,7 @@ struct xfs_error_cfg {
  */
 struct xfs_inodegc {
        struct llist_head       list;
-       struct work_struct      work;
+       struct delayed_work     work;
 
        /* approximate count of inodes in the list */
        unsigned int            items;
index 74ac9ca..392cb39 100644 (file)
@@ -454,9 +454,12 @@ xfs_qm_scall_getquota(
        struct xfs_dquot        *dqp;
        int                     error;
 
-       /* Flush inodegc work at the start of a quota reporting scan. */
+       /*
+        * Expedite pending inodegc work at the start of a quota reporting
+        * scan but don't block waiting for it to complete.
+        */
        if (id == 0)
-               xfs_inodegc_flush(mp);
+               xfs_inodegc_push(mp);
 
        /*
         * Try to get the dquot. We don't want it allocated on disk, so don't
@@ -498,7 +501,7 @@ xfs_qm_scall_getquota_next(
 
        /* Flush inodegc work at the start of a quota reporting scan. */
        if (*id == 0)
-               xfs_inodegc_flush(mp);
+               xfs_inodegc_push(mp);
 
        error = xfs_qm_dqget_next(mp, *id, type, &dqp);
        if (error)
index ed18160..aa977c7 100644 (file)
@@ -797,8 +797,11 @@ xfs_fs_statfs(
        xfs_extlen_t            lsize;
        int64_t                 ffree;
 
-       /* Wait for whatever inactivations are in progress. */
-       xfs_inodegc_flush(mp);
+       /*
+        * Expedite background inodegc but don't wait. We do not want to block
+        * here waiting hours for a billion extent file to be truncated.
+        */
+       xfs_inodegc_push(mp);
 
        statp->f_type = XFS_SUPER_MAGIC;
        statp->f_namelen = MAXNAMELEN - 1;
@@ -1074,7 +1077,7 @@ xfs_inodegc_init_percpu(
                gc = per_cpu_ptr(mp->m_inodegc, cpu);
                init_llist_head(&gc->list);
                gc->items = 0;
-               INIT_WORK(&gc->work, xfs_inodegc_worker);
+               INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
        }
        return 0;
 }
index d320265..0fa1b7a 100644 (file)
@@ -240,6 +240,7 @@ DEFINE_EVENT(xfs_fs_class, name,                                    \
        TP_PROTO(struct xfs_mount *mp, void *caller_ip), \
        TP_ARGS(mp, caller_ip))
 DEFINE_FS_EVENT(xfs_inodegc_flush);
+DEFINE_FS_EVENT(xfs_inodegc_push);
 DEFINE_FS_EVENT(xfs_inodegc_start);
 DEFINE_FS_EVENT(xfs_inodegc_stop);
 DEFINE_FS_EVENT(xfs_inodegc_queue);
index 35e13e1..c325a28 100644 (file)
@@ -68,6 +68,18 @@ xfs_attr_rele_log_assist(
        xlog_drop_incompat_feat(mp->m_log);
 }
 
+static inline bool
+xfs_attr_want_log_assist(
+       struct xfs_mount        *mp)
+{
+#ifdef DEBUG
+       /* Logged xattrs require a V5 super for log_incompat */
+       return xfs_has_crc(mp) && xfs_globals.larp;
+#else
+       return false;
+#endif
+}
+
 /*
  * Set or remove an xattr, having grabbed the appropriate logging resources
  * prior to calling libxfs.
@@ -80,11 +92,14 @@ xfs_attr_change(
        bool                    use_logging = false;
        int                     error;
 
-       if (xfs_has_larp(mp)) {
+       ASSERT(!(args->op_flags & XFS_DA_OP_LOGGED));
+
+       if (xfs_attr_want_log_assist(mp)) {
                error = xfs_attr_grab_log_assist(mp);
                if (error)
                        return error;
 
+               args->op_flags |= XFS_DA_OP_LOGGED;
                use_logging = true;
        }
 
index 0532997..f5d8338 100644 (file)
@@ -616,7 +616,7 @@ static int zonefs_inode_setattr(struct user_namespace *mnt_userns,
             !uid_eq(iattr->ia_uid, inode->i_uid)) ||
            ((iattr->ia_valid & ATTR_GID) &&
             !gid_eq(iattr->ia_gid, inode->i_gid))) {
-               ret = dquot_transfer(inode, iattr);
+               ret = dquot_transfer(mnt_userns, inode, iattr);
                if (ret)
                        return ret;
        }
index c610858..d389bab 100644 (file)
@@ -145,6 +145,7 @@ extern bool cppc_allow_fast_switch(void);
 extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
 extern unsigned int cppc_get_transition_latency(int cpu);
 extern bool cpc_ffh_supported(void);
+extern bool cpc_supported_by_cpu(void);
 extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val);
 extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val);
 #else /* !CONFIG_ACPI_CPPC_LIB */
index fd7e8fb..961f4d8 100644 (file)
 #define wmb()  do { kcsan_wmb(); __wmb(); } while (0)
 #endif
 
+#ifdef __dma_mb
+#define dma_mb()       do { kcsan_mb(); __dma_mb(); } while (0)
+#endif
+
 #ifdef __dma_rmb
 #define dma_rmb()      do { kcsan_rmb(); __dma_rmb(); } while (0)
 #endif
 #define wmb()  mb()
 #endif
 
+#ifndef dma_mb
+#define dma_mb()       mb()
+#endif
+
 #ifndef dma_rmb
 #define dma_rmb()      rmb()
 #endif
index 7ce93aa..72974cb 100644 (file)
@@ -964,7 +964,34 @@ static inline void iounmap(volatile void __iomem *addr)
 #elif defined(CONFIG_GENERIC_IOREMAP)
 #include <linux/pgtable.h>
 
-void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
+/*
+ * Arch code can implement the following two hooks when using GENERIC_IOREMAP
+ * ioremap_allowed() return a bool,
+ *   - true means continue to remap
+ *   - false means skip remap and return directly
+ * iounmap_allowed() return a bool,
+ *   - true means continue to vunmap
+ *   - false means skip vunmap and return directly
+ */
+#ifndef ioremap_allowed
+#define ioremap_allowed ioremap_allowed
+static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size,
+                                  unsigned long prot)
+{
+       return true;
+}
+#endif
+
+#ifndef iounmap_allowed
+#define iounmap_allowed iounmap_allowed
+static inline bool iounmap_allowed(void *addr)
+{
+       return true;
+}
+#endif
+
+void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+                          unsigned long prot);
 void iounmap(volatile void __iomem *addr);
 
 static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
@@ -1125,9 +1152,7 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
 }
 #endif
 
-#ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED
 extern int devmem_is_allowed(unsigned long pfn);
-#endif
 
 #endif /* __KERNEL__ */
 
index ff3e825..492dce4 100644 (file)
  *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
  *  and therefore doesn't naturally serialize with software page-table walkers.
  *
+ *  MMU_GATHER_NO_FLUSH_CACHE
+ *
+ *  Indicates the architecture has flush_cache_range() but it needs *NOT* be called
+ *  before unmapping a VMA.
+ *
+ *  NOTE: strictly speaking we shouldn't have this knob and instead rely on
+ *       flush_cache_range() being a NOP, except Sparc64 seems to be
+ *       different here.
+ *
+ *  MMU_GATHER_MERGE_VMAS
+ *
+ *  Indicates the architecture wants to merge ranges over VMAs; typical when
+ *  multiple range invalidates are more expensive than a full invalidate.
+ *
  *  MMU_GATHER_NO_RANGE
  *
- *  Use this if your architecture lacks an efficient flush_tlb_range().
+ *  Use this if your architecture lacks an efficient flush_tlb_range(). This
+ *  option implies MMU_GATHER_MERGE_VMAS above.
  *
  *  MMU_GATHER_NO_GATHER
  *
@@ -288,6 +303,7 @@ struct mmu_gather {
         */
        unsigned int            vma_exec : 1;
        unsigned int            vma_huge : 1;
+       unsigned int            vma_pfn  : 1;
 
        unsigned int            batch_count;
 
@@ -334,8 +350,8 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
 
 #ifdef CONFIG_MMU_GATHER_NO_RANGE
 
-#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
-#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
+#if defined(tlb_flush)
+#error MMU_GATHER_NO_RANGE relies on default tlb_flush()
 #endif
 
 /*
@@ -352,20 +368,9 @@ static inline void tlb_flush(struct mmu_gather *tlb)
                flush_tlb_mm(tlb->mm);
 }
 
-static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
-#define tlb_end_vma tlb_end_vma
-static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
 #else /* CONFIG_MMU_GATHER_NO_RANGE */
 
 #ifndef tlb_flush
-
-#if defined(tlb_start_vma) || defined(tlb_end_vma)
-#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
-#endif
-
 /*
  * When an architecture does not provide its own tlb_flush() implementation
  * but does have a reasonably efficient flush_vma_range() implementation
@@ -385,6 +390,9 @@ static inline void tlb_flush(struct mmu_gather *tlb)
                flush_tlb_range(&vma, tlb->start, tlb->end);
        }
 }
+#endif
+
+#endif /* CONFIG_MMU_GATHER_NO_RANGE */
 
 static inline void
 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
@@ -402,17 +410,9 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
         */
        tlb->vma_huge = is_vm_hugetlb_page(vma);
        tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
+       tlb->vma_pfn  = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
 }
 
-#else
-
-static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
-#endif
-
-#endif /* CONFIG_MMU_GATHER_NO_RANGE */
-
 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
        /*
@@ -486,32 +486,36 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
  * case where we're doing a full MM flush.  When we're doing a munmap,
  * the vmas are adjusted to only cover the region to be torn down.
  */
-#ifndef tlb_start_vma
 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 {
        if (tlb->fullmm)
                return;
 
        tlb_update_vma_flags(tlb, vma);
+#ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
        flush_cache_range(vma, vma->vm_start, vma->vm_end);
-}
 #endif
+}
 
-#ifndef tlb_end_vma
 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 {
        if (tlb->fullmm)
                return;
 
        /*
-        * Do a TLB flush and reset the range at VMA boundaries; this avoids
-        * the ranges growing with the unused space between consecutive VMAs,
-        * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
-        * this.
+        * VM_PFNMAP is more fragile because the core mm will not track the
+        * page mapcount -- there might not be page-frames for these PFNs after
+        * all. Force flush TLBs for such ranges to avoid munmap() vs
+        * unmap_mapping_range() races.
         */
-       tlb_flush_mmu_tlbonly(tlb);
+       if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
+               /*
+                * Do a TLB flush and reset the range at VMA boundaries; this avoids
+                * the ranges growing with the unused space between consecutive VMAs.
+                */
+               tlb_flush_mmu_tlbonly(tlb);
+       }
 }
-#endif
 
 /*
  * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
index 0777725..10b1990 100644 (file)
@@ -1022,6 +1022,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
        for ((__i) = 0; \
             (__i) < (__state)->num_private_objs && \
                     ((obj) = (__state)->private_objs[__i].ptr, \
+                     (void)(obj) /* Only to avoid unused-but-set-variable warning */, \
                      (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \
             (__i)++)
 
index 0fca8f3..addb135 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/dma-fence.h>
 #include <linux/completion.h>
 #include <linux/xarray.h>
-#include <linux/irq_work.h>
+#include <linux/workqueue.h>
 
 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
 
@@ -295,7 +295,7 @@ struct drm_sched_job {
         */
        union {
                struct dma_fence_cb             finish_cb;
-               struct irq_work                 work;
+               struct work_struct              work;
        };
 
        uint64_t                        id;
index 4416536..ca89a48 100644 (file)
@@ -311,12 +311,12 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
 }
 
 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
-void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
-                          struct ttm_resource *res);
-void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
-                          struct ttm_resource *res);
 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk);
 
+void ttm_resource_add_bulk_move(struct ttm_resource *res,
+                               struct ttm_buffer_object *bo);
+void ttm_resource_del_bulk_move(struct ttm_resource *res,
+                               struct ttm_buffer_object *bo);
 void ttm_resource_move_to_lru_tail(struct ttm_resource *res);
 
 void ttm_resource_init(struct ttm_buffer_object *bo,
index 6c5d496..69a13e1 100644 (file)
@@ -84,6 +84,9 @@ extern struct key *find_asymmetric_key(struct key *keyring,
                                       const struct asymmetric_key_id *id_2,
                                       bool partial);
 
+int x509_load_certificate_list(const u8 cert_list[], const unsigned long list_size,
+                              const struct key *keyring);
+
 /*
  * The payload is at the discretion of the subtype.
  */
index 4f82a5b..44975c1 100644 (file)
@@ -584,7 +584,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
 extern bool osc_sb_apei_support_acked;
 extern bool osc_pc_lpi_support_confirmed;
 extern bool osc_sb_native_usb4_support_confirmed;
-extern bool osc_sb_cppc_not_supported;
+extern bool osc_sb_cppc2_support_acked;
 extern bool osc_cpc_flexible_adr_space_confirmed;
 
 /* USB4 Capabilities */
index 2bd073f..d452071 100644 (file)
@@ -119,6 +119,8 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
 
 extern struct backing_dev_info noop_backing_dev_info;
 
+int bdi_init(struct backing_dev_info *bdi);
+
 /**
  * writeback_in_progress - determine whether there is writeback in progress
  * @wb: bdi_writeback of interest
index 608d577..2f7b434 100644 (file)
@@ -342,7 +342,6 @@ static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
  */
 struct blk_independent_access_range {
        struct kobject          kobj;
-       struct request_queue    *queue;
        sector_t                sector;
        sector_t                nr_sectors;
 };
@@ -482,7 +481,6 @@ struct request_queue {
 #endif /* CONFIG_BLK_DEV_ZONED */
 
        int                     node;
-       struct mutex            debugfs_mutex;
 #ifdef CONFIG_BLK_DEV_IO_TRACE
        struct blk_trace __rcu  *blk_trace;
 #endif
@@ -526,11 +524,12 @@ struct request_queue {
        struct bio_set          bio_split;
 
        struct dentry           *debugfs_dir;
-
-#ifdef CONFIG_BLK_DEBUG_FS
        struct dentry           *sched_debugfs_dir;
        struct dentry           *rqos_debugfs_dir;
-#endif
+       /*
+        * Serializes all debugfs metadata operations using the above dentries.
+        */
+       struct mutex            debugfs_mutex;
 
        bool                    mq_sysfs_init_done;
 
@@ -575,6 +574,7 @@ struct request_queue {
 #define QUEUE_FLAG_RQ_ALLOC_TIME 27    /* record rq->alloc_time_ns */
 #define QUEUE_FLAG_HCTX_ACTIVE 28      /* at least one blk-mq hctx is active */
 #define QUEUE_FLAG_NOWAIT       29     /* device supports NOWAIT */
+#define QUEUE_FLAG_SQ_SCHED     30     /* single queue style io dispatch */
 
 #define QUEUE_FLAG_MQ_DEFAULT  ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_SAME_COMP) |          \
@@ -616,6 +616,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
 #define blk_queue_pm_only(q)   atomic_read(&(q)->pm_only)
 #define blk_queue_registered(q)        test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
 #define blk_queue_nowait(q)    test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
+#define blk_queue_sq_sched(q)  test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
 
 extern void blk_set_pm_only(struct request_queue *q);
 extern void blk_clear_pm_only(struct request_queue *q);
@@ -1006,8 +1007,6 @@ void disk_set_independent_access_ranges(struct gendisk *disk,
  */
 /* Supports zoned block devices sequential write constraint */
 #define ELEVATOR_F_ZBD_SEQ_WRITE       (1U << 0)
-/* Supports scheduling on multiple hardware queues */
-#define ELEVATOR_F_MQ_AWARE            (1U << 1)
 
 extern void blk_queue_required_elevator_features(struct request_queue *q,
                                                 unsigned int features);
index 025fd0e..187b54a 100644 (file)
@@ -264,7 +264,8 @@ struct css_set {
         * List of csets participating in the on-going migration either as
         * source or destination.  Protected by cgroup_mutex.
         */
-       struct list_head mg_preload_node;
+       struct list_head mg_src_preload_node;
+       struct list_head mg_dst_preload_node;
        struct list_head mg_node;
 
        /*
index d08dfcb..4f2a819 100644 (file)
@@ -24,6 +24,7 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
 /* context/locking */
 # define __must_hold(x)        __attribute__((context(x,1,1)))
 # define __acquires(x) __attribute__((context(x,0,1)))
+# define __cond_acquires(x) __attribute__((context(x,0,-1)))
 # define __releases(x) __attribute__((context(x,1,0)))
 # define __acquire(x)  __context__(x,1)
 # define __release(x)  __context__(x,-1)
@@ -50,6 +51,7 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
 /* context/locking */
 # define __must_hold(x)
 # define __acquires(x)
+# define __cond_acquires(x)
 # define __releases(x)
 # define __acquire(x)  (void)0
 # define __release(x)  (void)0
index 1436530..8c1686e 100644 (file)
@@ -16,7 +16,6 @@
 
 #include <linux/atomic.h>
 #include <linux/types.h>
-#include <linux/mutex.h>
 
 struct vc_data;
 struct console_font_op;
@@ -154,22 +153,6 @@ struct console {
        uint    ospeed;
        u64     seq;
        unsigned long dropped;
-       struct task_struct *thread;
-       bool    blocked;
-
-       /*
-        * The per-console lock is used by printing kthreads to synchronize
-        * this console with callers of console_lock(). This is necessary in
-        * order to allow printing kthreads to run in parallel to each other,
-        * while each safely accessing the @blocked field and synchronizing
-        * against direct printing via console_lock/console_unlock.
-        *
-        * Note: For synchronizing against direct printing via
-        *       console_trylock/console_unlock, see the static global
-        *       variable @console_kthreads_active.
-        */
-       struct mutex lock;
-
        void    *data;
        struct   console *next;
 };
index 54dc2f9..314802f 100644 (file)
@@ -65,6 +65,11 @@ extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
 extern ssize_t cpu_show_itlb_multihit(struct device *dev,
                                      struct device_attribute *attr, char *buf);
 extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_mmio_stale_data(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf);
+extern ssize_t cpu_show_retbleed(struct device *dev,
+                                struct device_attribute *attr, char *buf);
 
 extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,
index 19f0dbf..154daff 100644 (file)
@@ -130,7 +130,6 @@ enum cpuhp_state {
        CPUHP_ZCOMP_PREPARE,
        CPUHP_TIMERS_PREPARE,
        CPUHP_MIPS_SOC_PREPARE,
-       CPUHP_LOONGARCH_SOC_PREPARE,
        CPUHP_BP_PREPARE_DYN,
        CPUHP_BP_PREPARE_DYN_END                = CPUHP_BP_PREPARE_DYN + 20,
        CPUHP_BRINGUP_CPU,
@@ -230,6 +229,7 @@ enum cpuhp_state {
        CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
        CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
        CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE,
+       CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
        CPUHP_AP_PERF_ARM_L2X0_ONLINE,
        CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
        CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
index dc10bee..34aab4d 100644 (file)
@@ -148,6 +148,8 @@ struct devfreq_stats {
  *             reevaluate operable frequencies. Devfreq users may use
  *             devfreq.nb to the corresponding register notifier call chain.
  * @work:      delayed work for load monitoring.
+ * @freq_table:                current frequency table used by the devfreq driver.
+ * @max_state:         count of entry present in the frequency table.
  * @previous_freq:     previously configured frequency value.
  * @last_status:       devfreq user device info, performance statistics
  * @data:      Private data of the governor. The devfreq framework does not
@@ -185,6 +187,9 @@ struct devfreq {
        struct notifier_block nb;
        struct delayed_work work;
 
+       unsigned long *freq_table;
+       unsigned int max_state;
+
        unsigned long previous_freq;
        struct devfreq_dev_status last_status;
 
index b698266..6c57339 100644 (file)
@@ -21,7 +21,7 @@
  * We consider 10% difference as significant.
  */
 #define IS_SIGNIFICANT_DIFF(val, ref) \
-       (((100UL * abs((val) - (ref))) / (ref)) > 10)
+       ((ref) && (((100UL * abs((val) - (ref))) / (ref)) > 10))
 
 /*
  * Calculate the gap between two values.
index 4c374be..aa63e0b 100644 (file)
@@ -21,7 +21,8 @@ extern enum integrity_status evm_verifyxattr(struct dentry *dentry,
                                             void *xattr_value,
                                             size_t xattr_value_len,
                                             struct integrity_iint_cache *iint);
-extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr);
+extern int evm_inode_setattr(struct user_namespace *mnt_userns,
+                            struct dentry *dentry, struct iattr *attr);
 extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid);
 extern int evm_inode_setxattr(struct user_namespace *mnt_userns,
                              struct dentry *dentry, const char *name,
@@ -68,7 +69,8 @@ static inline enum integrity_status evm_verifyxattr(struct dentry *dentry,
 }
 #endif
 
-static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+static inline int evm_inode_setattr(struct user_namespace *mnt_userns,
+                                   struct dentry *dentry, struct iattr *attr)
 {
        return 0;
 }
index edc2855..8ad743d 100644 (file)
 #define FANOTIFY_MARK_TYPE_BITS        (FAN_MARK_INODE | FAN_MARK_MOUNT | \
                                 FAN_MARK_FILESYSTEM)
 
+#define FANOTIFY_MARK_CMD_BITS (FAN_MARK_ADD | FAN_MARK_REMOVE | \
+                                FAN_MARK_FLUSH)
+
+#define FANOTIFY_MARK_IGNORE_BITS (FAN_MARK_IGNORED_MASK | \
+                                  FAN_MARK_IGNORE)
+
 #define FANOTIFY_MARK_FLAGS    (FANOTIFY_MARK_TYPE_BITS | \
-                                FAN_MARK_ADD | \
-                                FAN_MARK_REMOVE | \
+                                FANOTIFY_MARK_CMD_BITS | \
+                                FANOTIFY_MARK_IGNORE_BITS | \
                                 FAN_MARK_DONT_FOLLOW | \
                                 FAN_MARK_ONLYDIR | \
-                                FAN_MARK_IGNORED_MASK | \
                                 FAN_MARK_IGNORED_SURV_MODIFY | \
-                                FAN_MARK_EVICTABLE | \
-                                FAN_MARK_FLUSH)
+                                FAN_MARK_EVICTABLE)
 
 /*
  * Events that can be reported with data type FSNOTIFY_EVENT_PATH.
                                         FANOTIFY_PERM_EVENTS | \
                                         FAN_Q_OVERFLOW | FAN_ONDIR)
 
+/* Events and flags relevant only for directories */
+#define FANOTIFY_DIRONLY_EVENT_BITS    (FANOTIFY_DIRENT_EVENTS | \
+                                        FAN_EVENT_ON_CHILD | FAN_ONDIR)
+
 #define ALL_FANOTIFY_EVENT_BITS                (FANOTIFY_OUTGOING_EVENTS | \
                                         FANOTIFY_EVENT_FLAGS)
 
index ff5596d..2382dec 100644 (file)
@@ -15,6 +15,8 @@ void fbcon_new_modelist(struct fb_info *info);
 void fbcon_get_requirement(struct fb_info *info,
                           struct fb_blit_caps *caps);
 void fbcon_fb_blanked(struct fb_info *info, int blank);
+int  fbcon_modechange_possible(struct fb_info *info,
+                              struct fb_var_screeninfo *var);
 void fbcon_update_vcs(struct fb_info *info, bool all);
 void fbcon_remap_all(struct fb_info *info);
 int fbcon_set_con2fb_map_ioctl(void __user *argp);
@@ -33,6 +35,8 @@ static inline void fbcon_new_modelist(struct fb_info *info) {}
 static inline void fbcon_get_requirement(struct fb_info *info,
                                         struct fb_blit_caps *caps) {}
 static inline void fbcon_fb_blanked(struct fb_info *info, int blank) {}
+static inline int  fbcon_modechange_possible(struct fb_info *info,
+                               struct fb_var_screeninfo *var) { return 0; }
 static inline void fbcon_update_vcs(struct fb_info *info, bool all) {}
 static inline void fbcon_remap_all(struct fb_info *info) {}
 static inline int fbcon_set_con2fb_map_ioctl(void __user *argp) { return 0; }
index 9ad5e35..ec2e358 100644 (file)
@@ -221,8 +221,26 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 struct iattr {
        unsigned int    ia_valid;
        umode_t         ia_mode;
-       kuid_t          ia_uid;
-       kgid_t          ia_gid;
+       /*
+        * The two anonymous unions wrap structures with the same member.
+        *
+        * Filesystems raising FS_ALLOW_IDMAP need to use ia_vfs{g,u}id which
+        * are a dedicated type requiring the filesystem to use the dedicated
+        * helpers. Other filesystem can continue to use ia_{g,u}id until they
+        * have been ported.
+        *
+        * They always contain the same value. In other words FS_ALLOW_IDMAP
+        * pass down the same value on idmapped mounts as they would on regular
+        * mounts.
+        */
+       union {
+               kuid_t          ia_uid;
+               vfsuid_t        ia_vfsuid;
+       };
+       union {
+               kgid_t          ia_gid;
+               vfsgid_t        ia_vfsgid;
+       };
        loff_t          ia_size;
        struct timespec64 ia_atime;
        struct timespec64 ia_mtime;
@@ -1600,13 +1618,68 @@ static inline void i_gid_write(struct inode *inode, gid_t gid)
  * @mnt_userns: user namespace of the mount the inode was found from
  * @inode: inode to map
  *
+ * Note, this will eventually be removed completely in favor of the type-safe
+ * i_uid_into_vfsuid().
+ *
  * Return: the inode's i_uid mapped down according to @mnt_userns.
  * If the inode's i_uid has no mapping INVALID_UID is returned.
  */
 static inline kuid_t i_uid_into_mnt(struct user_namespace *mnt_userns,
                                    const struct inode *inode)
 {
-       return mapped_kuid_fs(mnt_userns, i_user_ns(inode), inode->i_uid);
+       return AS_KUIDT(make_vfsuid(mnt_userns, i_user_ns(inode), inode->i_uid));
+}
+
+/**
+ * i_uid_into_vfsuid - map an inode's i_uid down into a mnt_userns
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @inode: inode to map
+ *
+ * Return: whe inode's i_uid mapped down according to @mnt_userns.
+ * If the inode's i_uid has no mapping INVALID_VFSUID is returned.
+ */
+static inline vfsuid_t i_uid_into_vfsuid(struct user_namespace *mnt_userns,
+                                        const struct inode *inode)
+{
+       return make_vfsuid(mnt_userns, i_user_ns(inode), inode->i_uid);
+}
+
+/**
+ * i_uid_needs_update - check whether inode's i_uid needs to be updated
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Check whether the $inode's i_uid field needs to be updated taking idmapped
+ * mounts into account if the filesystem supports it.
+ *
+ * Return: true if @inode's i_uid field needs to be updated, false if not.
+ */
+static inline bool i_uid_needs_update(struct user_namespace *mnt_userns,
+                                     const struct iattr *attr,
+                                     const struct inode *inode)
+{
+       return ((attr->ia_valid & ATTR_UID) &&
+               !vfsuid_eq(attr->ia_vfsuid,
+                          i_uid_into_vfsuid(mnt_userns, inode)));
+}
+
+/**
+ * i_uid_update - update @inode's i_uid field
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Safely update @inode's i_uid field translating the vfsuid of any idmapped
+ * mount into the filesystem kuid.
+ */
+static inline void i_uid_update(struct user_namespace *mnt_userns,
+                               const struct iattr *attr,
+                               struct inode *inode)
+{
+       if (attr->ia_valid & ATTR_UID)
+               inode->i_uid = from_vfsuid(mnt_userns, i_user_ns(inode),
+                                          attr->ia_vfsuid);
 }
 
 /**
@@ -1614,13 +1687,68 @@ static inline kuid_t i_uid_into_mnt(struct user_namespace *mnt_userns,
  * @mnt_userns: user namespace of the mount the inode was found from
  * @inode: inode to map
  *
+ * Note, this will eventually be removed completely in favor of the type-safe
+ * i_gid_into_vfsgid().
+ *
  * Return: the inode's i_gid mapped down according to @mnt_userns.
  * If the inode's i_gid has no mapping INVALID_GID is returned.
  */
 static inline kgid_t i_gid_into_mnt(struct user_namespace *mnt_userns,
                                    const struct inode *inode)
 {
-       return mapped_kgid_fs(mnt_userns, i_user_ns(inode), inode->i_gid);
+       return AS_KGIDT(make_vfsgid(mnt_userns, i_user_ns(inode), inode->i_gid));
+}
+
+/**
+ * i_gid_into_vfsgid - map an inode's i_gid down into a mnt_userns
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @inode: inode to map
+ *
+ * Return: the inode's i_gid mapped down according to @mnt_userns.
+ * If the inode's i_gid has no mapping INVALID_VFSGID is returned.
+ */
+static inline vfsgid_t i_gid_into_vfsgid(struct user_namespace *mnt_userns,
+                                        const struct inode *inode)
+{
+       return make_vfsgid(mnt_userns, i_user_ns(inode), inode->i_gid);
+}
+
+/**
+ * i_gid_needs_update - check whether inode's i_gid needs to be updated
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Check whether the $inode's i_gid field needs to be updated taking idmapped
+ * mounts into account if the filesystem supports it.
+ *
+ * Return: true if @inode's i_gid field needs to be updated, false if not.
+ */
+static inline bool i_gid_needs_update(struct user_namespace *mnt_userns,
+                                     const struct iattr *attr,
+                                     const struct inode *inode)
+{
+       return ((attr->ia_valid & ATTR_GID) &&
+               !vfsgid_eq(attr->ia_vfsgid,
+                          i_gid_into_vfsgid(mnt_userns, inode)));
+}
+
+/**
+ * i_gid_update - update @inode's i_gid field
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Safely update @inode's i_gid field translating the vfsgid of any idmapped
+ * mount into the filesystem kgid.
+ */
+static inline void i_gid_update(struct user_namespace *mnt_userns,
+                               const struct iattr *attr,
+                               struct inode *inode)
+{
+       if (attr->ia_valid & ATTR_GID)
+               inode->i_gid = from_vfsgid(mnt_userns, i_user_ns(inode),
+                                          attr->ia_vfsgid);
 }
 
 /**
@@ -2195,8 +2323,8 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
 static inline bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns,
                                   struct inode *inode)
 {
-       return !uid_valid(i_uid_into_mnt(mnt_userns, inode)) ||
-              !gid_valid(i_gid_into_mnt(mnt_userns, inode));
+       return !vfsuid_valid(i_uid_into_vfsuid(mnt_userns, inode)) ||
+              !vfsgid_valid(i_gid_into_vfsgid(mnt_userns, inode));
 }
 
 static inline int iocb_flags(struct file *file);
index 72585c9..b862656 100644 (file)
@@ -130,6 +130,7 @@ struct fscache_cookie {
 #define FSCACHE_COOKIE_DO_PREP_TO_WRITE        12              /* T if cookie needs write preparation */
 #define FSCACHE_COOKIE_HAVE_DATA       13              /* T if this cookie has data stored */
 #define FSCACHE_COOKIE_IS_HASHED       14              /* T if this cookie is hashed */
+#define FSCACHE_COOKIE_DO_INVALIDATE   15              /* T if cookie needs invalidation */
 
        enum fscache_cookie_state       state;
        u8                              advice;         /* FSCACHE_ADV_* */
index 9560734..d7d96c8 100644 (file)
@@ -518,8 +518,8 @@ struct fsnotify_mark {
        struct hlist_node obj_list;
        /* Head of list of marks for an object [mark ref] */
        struct fsnotify_mark_connector *connector;
-       /* Events types to ignore [mark->lock, group->mark_mutex] */
-       __u32 ignored_mask;
+       /* Events types and flags to ignore [mark->lock, group->mark_mutex] */
+       __u32 ignore_mask;
        /* General fsnotify mark flags */
 #define FSNOTIFY_MARK_FLAG_ALIVE               0x0001
 #define FSNOTIFY_MARK_FLAG_ATTACHED            0x0002
@@ -529,6 +529,7 @@ struct fsnotify_mark {
        /* fanotify mark flags */
 #define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x0100
 #define FSNOTIFY_MARK_FLAG_NO_IREF             0x0200
+#define FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS    0x0400
        unsigned int flags;             /* flags [mark->lock] */
 };
 
@@ -655,15 +656,91 @@ extern void fsnotify_remove_queued_event(struct fsnotify_group *group,
 
 /* functions used to manipulate the marks attached to inodes */
 
-/* Get mask for calculating object interest taking ignored mask into account */
+/*
+ * Canonical "ignore mask" including event flags.
+ *
+ * Note the subtle semantic difference from the legacy ->ignored_mask.
+ * ->ignored_mask traditionally only meant which events should be ignored,
+ * while ->ignore_mask also includes flags regarding the type of objects on
+ * which events should be ignored.
+ */
+static inline __u32 fsnotify_ignore_mask(struct fsnotify_mark *mark)
+{
+       __u32 ignore_mask = mark->ignore_mask;
+
+       /* The event flags in ignore mask take effect */
+       if (mark->flags & FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS)
+               return ignore_mask;
+
+       /*
+        * Legacy behavior:
+        * - Always ignore events on dir
+        * - Ignore events on child if parent is watching children
+        */
+       ignore_mask |= FS_ISDIR;
+       ignore_mask &= ~FS_EVENT_ON_CHILD;
+       ignore_mask |= mark->mask & FS_EVENT_ON_CHILD;
+
+       return ignore_mask;
+}
+
+/* Legacy ignored_mask - only event types to ignore */
+static inline __u32 fsnotify_ignored_events(struct fsnotify_mark *mark)
+{
+       return mark->ignore_mask & ALL_FSNOTIFY_EVENTS;
+}
+
+/*
+ * Check if mask (or ignore mask) should be applied depending if victim is a
+ * directory and whether it is reported to a watching parent.
+ */
+static inline bool fsnotify_mask_applicable(__u32 mask, bool is_dir,
+                                           int iter_type)
+{
+       /* Should mask be applied to a directory? */
+       if (is_dir && !(mask & FS_ISDIR))
+               return false;
+
+       /* Should mask be applied to a child? */
+       if (iter_type == FSNOTIFY_ITER_TYPE_PARENT &&
+           !(mask & FS_EVENT_ON_CHILD))
+               return false;
+
+       return true;
+}
+
+/*
+ * Effective ignore mask taking into account if event victim is a
+ * directory and whether it is reported to a watching parent.
+ */
+static inline __u32 fsnotify_effective_ignore_mask(struct fsnotify_mark *mark,
+                                                  bool is_dir, int iter_type)
+{
+       __u32 ignore_mask = fsnotify_ignored_events(mark);
+
+       if (!ignore_mask)
+               return 0;
+
+       /* For non-dir and non-child, no need to consult the event flags */
+       if (!is_dir && iter_type != FSNOTIFY_ITER_TYPE_PARENT)
+               return ignore_mask;
+
+       ignore_mask = fsnotify_ignore_mask(mark);
+       if (!fsnotify_mask_applicable(ignore_mask, is_dir, iter_type))
+               return 0;
+
+       return ignore_mask & ALL_FSNOTIFY_EVENTS;
+}
+
+/* Get mask for calculating object interest taking ignore mask into account */
 static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark)
 {
        __u32 mask = mark->mask;
 
-       if (!mark->ignored_mask)
+       if (!fsnotify_ignored_events(mark))
                return mask;
 
-       /* Interest in FS_MODIFY may be needed for clearing ignored mask */
+       /* Interest in FS_MODIFY may be needed for clearing ignore mask */
        if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
                mask |= FS_MODIFY;
 
@@ -671,7 +748,7 @@ static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark)
         * If mark is interested in ignoring events on children, the object must
         * show interest in those events for fsnotify_parent() to notice it.
         */
-       return mask | (mark->ignored_mask & ALL_FSNOTIFY_EVENTS);
+       return mask | mark->ignore_mask;
 }
 
 /* Get mask of events for a list of marks */
index 2d2ccae..0ace775 100644 (file)
@@ -348,7 +348,7 @@ struct vm_area_struct;
 #define GFP_DMA32      __GFP_DMA32
 #define GFP_HIGHUSER   (GFP_USER | __GFP_HIGHMEM)
 #define GFP_HIGHUSER_MOVABLE   (GFP_HIGHUSER | __GFP_MOVABLE | \
-                        __GFP_SKIP_KASAN_POISON)
+                        __GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON)
 #define GFP_TRANSHUGE_LIGHT    ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
                         __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
 #define GFP_TRANSHUGE  (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
index b1e0f1f..54c3c65 100644 (file)
@@ -167,21 +167,24 @@ struct gpio_irq_chip {
         */
        irq_flow_handler_t parent_handler;
 
-       /**
-        * @parent_handler_data:
-        *
-        * If @per_parent_data is false, @parent_handler_data is a single
-        * pointer used as the data associated with every parent interrupt.
-        *
-        * @parent_handler_data_array:
-        *
-        * If @per_parent_data is true, @parent_handler_data_array is
-        * an array of @num_parents pointers, and is used to associate
-        * different data for each parent. This cannot be NULL if
-        * @per_parent_data is true.
-        */
        union {
+               /**
+                * @parent_handler_data:
+                *
+                * If @per_parent_data is false, @parent_handler_data is a
+                * single pointer used as the data associated with every
+                * parent interrupt.
+                */
                void *parent_handler_data;
+
+               /**
+                * @parent_handler_data_array:
+                *
+                * If @per_parent_data is true, @parent_handler_data_array is
+                * an array of @num_parents pointers, and is used to associate
+                * different data for each parent. This cannot be NULL if
+                * @per_parent_data is true.
+                */
                void **parent_handler_data_array;
        };
 
index 3af34de..56d6a01 100644 (file)
@@ -149,19 +149,19 @@ static inline void *kmap_local_folio(struct folio *folio, size_t offset);
  * It is used in atomic context when code wants to access the contents of a
  * page that might be allocated from high memory (see __GFP_HIGHMEM), for
  * example a page in the pagecache.  The API has two functions, and they
- * can be used in a manner similar to the following:
+ * can be used in a manner similar to the following::
  *
- * -- Find the page of interest. --
- * struct page *page = find_get_page(mapping, offset);
+ *   // Find the page of interest.
+ *   struct page *page = find_get_page(mapping, offset);
  *
- * -- Gain access to the contents of that page. --
- * void *vaddr = kmap_atomic(page);
+ *   // Gain access to the contents of that page.
+ *   void *vaddr = kmap_atomic(page);
  *
- * -- Do something to the contents of that page. --
- * memset(vaddr, 0, PAGE_SIZE);
+ *   // Do something to the contents of that page.
+ *   memset(vaddr, 0, PAGE_SIZE);
  *
- * -- Unmap that page. --
- * kunmap_atomic(vaddr);
+ *   // Unmap that page.
+ *   kunmap_atomic(vaddr);
  *
  * Note that the kunmap_atomic() call takes the result of the kmap_atomic()
  * call, not the argument.
index de29821..4ddaf6a 100644 (file)
@@ -461,4 +461,16 @@ static inline int split_folio_to_list(struct folio *folio,
        return split_huge_page_to_list(&folio->page, list);
 }
 
+/*
+ * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
+ * limitations in the implementation like arm64 MTE can override this to
+ * false
+ */
+#ifndef arch_thp_swp_supported
+static inline bool arch_thp_swp_supported(void)
+{
+       return true;
+}
+#endif
+
 #endif /* _LINUX_HUGE_MM_H */
index 426b174..81708ca 100644 (file)
@@ -140,6 +140,11 @@ static inline int ima_measure_critical_data(const char *event_label,
 
 #endif /* CONFIG_IMA */
 
+#ifdef CONFIG_HAVE_IMA_KEXEC
+int __init ima_free_kexec_buffer(void);
+int __init ima_get_kexec_buffer(void **addr, size_t *size);
+#endif
+
 #ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT
 extern bool arch_ima_get_secureboot(void);
 extern const char * const *arch_get_ima_policy(void);
index 4f29139..5fcf89f 100644 (file)
@@ -612,7 +612,6 @@ struct intel_iommu {
 struct device_domain_info {
        struct list_head link;  /* link to domain siblings */
        struct list_head global; /* link to global list */
-       struct list_head table; /* link to pasid table */
        u32 segment;            /* PCI segment number */
        u8 bus;                 /* PCI bus number */
        u8 devfn;               /* PCI devfn number */
@@ -729,8 +728,6 @@ extern int dmar_ir_support(void);
 void *alloc_pgtable_page(int node);
 void free_pgtable_page(void *vaddr);
 struct intel_iommu *domain_get_iommu(struct dmar_domain *domain);
-int for_each_device_domain(int (*fn)(struct device_domain_info *info,
-                                    void *data), void *data);
 void iommu_flush_write_buffer(struct intel_iommu *iommu);
 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
 struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
index ce6536f..475683c 100644 (file)
@@ -452,6 +452,12 @@ static inline int kexec_crash_loaded(void) { return 0; }
 #define kexec_in_progress false
 #endif /* CONFIG_KEXEC_CORE */
 
+#ifdef CONFIG_KEXEC_SIG
+void set_kexec_sig_enforced(void);
+#else
+static inline void set_kexec_sig_enforced(void) {}
+#endif
+
 #endif /* !defined(__ASSEBMLY__) */
 
 #endif /* LINUX_KEXEC_H */
index c20f2d5..90a45ef 100644 (file)
@@ -1513,7 +1513,7 @@ static inline void kvm_arch_end_assignment(struct kvm *kvm)
 {
 }
 
-static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
+static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
 {
        return false;
 }
@@ -1822,6 +1822,15 @@ struct _kvm_stats_desc {
        STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE,                      \
                KVM_STATS_BASE_POW10, 0)
 
+/* Instantaneous boolean value, read only */
+#define STATS_DESC_IBOOLEAN(SCOPE, name)                                      \
+       STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN,                \
+               KVM_STATS_BASE_POW10, 0)
+/* Peak (sticky) boolean value, read/write */
+#define STATS_DESC_PBOOLEAN(SCOPE, name)                                      \
+       STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN,                   \
+               KVM_STATS_BASE_POW10, 0)
+
 /* Cumulative time in nanosecond */
 #define STATS_DESC_TIME_NSEC(SCOPE, name)                                     \
        STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS,             \
@@ -1853,7 +1862,7 @@ struct _kvm_stats_desc {
                        HALT_POLL_HIST_COUNT),                                 \
        STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist,             \
                        HALT_POLL_HIST_COUNT),                                 \
-       STATS_DESC_ICOUNTER(VCPU_GENERIC, blocking)
+       STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking)
 
 extern struct dentry *kvm_debugfs_dir;
 
index 99f17cc..c3a1f78 100644 (file)
@@ -38,7 +38,6 @@ extern void lockref_get(struct lockref *);
 extern int lockref_put_return(struct lockref *);
 extern int lockref_get_not_zero(struct lockref *);
 extern int lockref_put_not_zero(struct lockref *);
-extern int lockref_get_or_lock(struct lockref *);
 extern int lockref_put_or_lock(struct lockref *);
 
 extern void lockref_mark_dead(struct lockref *);
index e115952..c04c4fd 100644 (file)
@@ -16,7 +16,7 @@ static inline int memregion_alloc(gfp_t gfp)
 {
        return -ENOMEM;
 }
-void memregion_free(int id)
+static inline void memregion_free(int id)
 {
 }
 #endif
index bc8f326..7898e29 100644 (file)
@@ -1130,23 +1130,27 @@ static inline bool is_zone_movable_page(const struct page *page)
 #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
 DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
 
-bool __put_devmap_managed_page(struct page *page);
-static inline bool put_devmap_managed_page(struct page *page)
+bool __put_devmap_managed_page_refs(struct page *page, int refs);
+static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
 {
        if (!static_branch_unlikely(&devmap_managed_key))
                return false;
        if (!is_zone_device_page(page))
                return false;
-       return __put_devmap_managed_page(page);
+       return __put_devmap_managed_page_refs(page, refs);
 }
-
 #else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
-static inline bool put_devmap_managed_page(struct page *page)
+static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
 {
        return false;
 }
 #endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
 
+static inline bool put_devmap_managed_page(struct page *page)
+{
+       return put_devmap_managed_page_refs(page, 1);
+}
+
 /* 127: arbitrary random number, small enough to assemble well */
 #define folio_ref_zero_or_close_to_overflow(folio) \
        ((unsigned int) folio_ref_count(folio) + 127u <= 127u)
@@ -1600,7 +1604,7 @@ static inline bool is_pinnable_page(struct page *page)
        if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
                return false;
 #endif
-       return !(is_zone_movable_page(page) || is_zero_pfn(page_to_pfn(page)));
+       return !is_zone_movable_page(page) || is_zero_pfn(page_to_pfn(page));
 }
 #else
 static inline bool is_pinnable_page(struct page *page)
@@ -3232,6 +3236,7 @@ enum mf_flags {
        MF_MUST_KILL = 1 << 2,
        MF_SOFT_OFFLINE = 1 << 3,
        MF_UNPOISON = 1 << 4,
+       MF_SW_SIMULATED = 1 << 5,
 };
 extern int memory_failure(unsigned long pfn, int flags);
 extern void memory_failure_queue(unsigned long pfn, int flags);
index ee5a217..f6e5369 100644 (file)
@@ -13,6 +13,129 @@ struct user_namespace;
  */
 extern struct user_namespace init_user_ns;
 
+typedef struct {
+       uid_t val;
+} vfsuid_t;
+
+typedef struct {
+       gid_t val;
+} vfsgid_t;
+
+static_assert(sizeof(vfsuid_t) == sizeof(kuid_t));
+static_assert(sizeof(vfsgid_t) == sizeof(kgid_t));
+static_assert(offsetof(vfsuid_t, val) == offsetof(kuid_t, val));
+static_assert(offsetof(vfsgid_t, val) == offsetof(kgid_t, val));
+
+#ifdef CONFIG_MULTIUSER
+static inline uid_t __vfsuid_val(vfsuid_t uid)
+{
+       return uid.val;
+}
+
+static inline gid_t __vfsgid_val(vfsgid_t gid)
+{
+       return gid.val;
+}
+#else
+static inline uid_t __vfsuid_val(vfsuid_t uid)
+{
+       return 0;
+}
+
+static inline gid_t __vfsgid_val(vfsgid_t gid)
+{
+       return 0;
+}
+#endif
+
+static inline bool vfsuid_valid(vfsuid_t uid)
+{
+       return __vfsuid_val(uid) != (uid_t)-1;
+}
+
+static inline bool vfsgid_valid(vfsgid_t gid)
+{
+       return __vfsgid_val(gid) != (gid_t)-1;
+}
+
+static inline bool vfsuid_eq(vfsuid_t left, vfsuid_t right)
+{
+       return vfsuid_valid(left) && __vfsuid_val(left) == __vfsuid_val(right);
+}
+
+static inline bool vfsgid_eq(vfsgid_t left, vfsgid_t right)
+{
+       return vfsgid_valid(left) && __vfsgid_val(left) == __vfsgid_val(right);
+}
+
+/**
+ * vfsuid_eq_kuid - check whether kuid and vfsuid have the same value
+ * @vfsuid: the vfsuid to compare
+ * @kuid: the kuid to compare
+ *
+ * Check whether @vfsuid and @kuid have the same values.
+ *
+ * Return: true if @vfsuid and @kuid have the same value, false if not.
+ * Comparison between two invalid uids returns false.
+ */
+static inline bool vfsuid_eq_kuid(vfsuid_t vfsuid, kuid_t kuid)
+{
+       return vfsuid_valid(vfsuid) && __vfsuid_val(vfsuid) == __kuid_val(kuid);
+}
+
+/**
+ * vfsgid_eq_kgid - check whether kgid and vfsgid have the same value
+ * @vfsgid: the vfsgid to compare
+ * @kgid: the kgid to compare
+ *
+ * Check whether @vfsgid and @kgid have the same values.
+ *
+ * Return: true if @vfsgid and @kgid have the same value, false if not.
+ * Comparison between two invalid gids returns false.
+ */
+static inline bool vfsgid_eq_kgid(vfsgid_t vfsgid, kgid_t kgid)
+{
+       return vfsgid_valid(vfsgid) && __vfsgid_val(vfsgid) == __kgid_val(kgid);
+}
+
+/*
+ * vfs{g,u}ids are created from k{g,u}ids.
+ * We don't allow them to be created from regular {u,g}id.
+ */
+#define VFSUIDT_INIT(val) (vfsuid_t){ __kuid_val(val) }
+#define VFSGIDT_INIT(val) (vfsgid_t){ __kgid_val(val) }
+
+#define INVALID_VFSUID VFSUIDT_INIT(INVALID_UID)
+#define INVALID_VFSGID VFSGIDT_INIT(INVALID_GID)
+
+/*
+ * Allow a vfs{g,u}id to be used as a k{g,u}id where we want to compare
+ * whether the mapped value is identical to value of a k{g,u}id.
+ */
+#define AS_KUIDT(val) (kuid_t){ __vfsuid_val(val) }
+#define AS_KGIDT(val) (kgid_t){ __vfsgid_val(val) }
+
+#ifdef CONFIG_MULTIUSER
+/**
+ * vfsgid_in_group_p() - check whether a vfsuid matches the caller's groups
+ * @vfsgid: the mnt gid to match
+ *
+ * This function can be used to determine whether @vfsuid matches any of the
+ * caller's groups.
+ *
+ * Return: 1 if vfsuid matches caller's groups, 0 if not.
+ */
+static inline int vfsgid_in_group_p(vfsgid_t vfsgid)
+{
+       return in_group_p(AS_KGIDT(vfsgid));
+}
+#else
+static inline int vfsgid_in_group_p(vfsgid_t vfsgid)
+{
+       return 1;
+}
+#endif
+
 /**
  * initial_idmapping - check whether this is the initial mapping
  * @ns: idmapping to check
@@ -48,7 +171,7 @@ static inline bool no_idmapping(const struct user_namespace *mnt_userns,
 }
 
 /**
- * mapped_kuid_fs - map a filesystem kuid into a mnt_userns
+ * make_vfsuid - map a filesystem kuid into a mnt_userns
  * @mnt_userns: the mount's idmapping
  * @fs_userns: the filesystem's idmapping
  * @kuid : kuid to be mapped
@@ -67,25 +190,33 @@ static inline bool no_idmapping(const struct user_namespace *mnt_userns,
  * If @kuid has no mapping in either @mnt_userns or @fs_userns INVALID_UID is
  * returned.
  */
-static inline kuid_t mapped_kuid_fs(struct user_namespace *mnt_userns,
-                                   struct user_namespace *fs_userns,
-                                   kuid_t kuid)
+
+static inline vfsuid_t make_vfsuid(struct user_namespace *mnt_userns,
+                                  struct user_namespace *fs_userns,
+                                  kuid_t kuid)
 {
        uid_t uid;
 
        if (no_idmapping(mnt_userns, fs_userns))
-               return kuid;
+               return VFSUIDT_INIT(kuid);
        if (initial_idmapping(fs_userns))
                uid = __kuid_val(kuid);
        else
                uid = from_kuid(fs_userns, kuid);
        if (uid == (uid_t)-1)
-               return INVALID_UID;
-       return make_kuid(mnt_userns, uid);
+               return INVALID_VFSUID;
+       return VFSUIDT_INIT(make_kuid(mnt_userns, uid));
+}
+
+static inline kuid_t mapped_kuid_fs(struct user_namespace *mnt_userns,
+                                   struct user_namespace *fs_userns,
+                                   kuid_t kuid)
+{
+       return AS_KUIDT(make_vfsuid(mnt_userns, fs_userns, kuid));
 }
 
 /**
- * mapped_kgid_fs - map a filesystem kgid into a mnt_userns
+ * make_vfsgid - map a filesystem kgid into a mnt_userns
  * @mnt_userns: the mount's idmapping
  * @fs_userns: the filesystem's idmapping
  * @kgid : kgid to be mapped
@@ -104,21 +235,56 @@ static inline kuid_t mapped_kuid_fs(struct user_namespace *mnt_userns,
  * If @kgid has no mapping in either @mnt_userns or @fs_userns INVALID_GID is
  * returned.
  */
-static inline kgid_t mapped_kgid_fs(struct user_namespace *mnt_userns,
-                                   struct user_namespace *fs_userns,
-                                   kgid_t kgid)
+
+static inline vfsgid_t make_vfsgid(struct user_namespace *mnt_userns,
+                                  struct user_namespace *fs_userns,
+                                  kgid_t kgid)
 {
        gid_t gid;
 
        if (no_idmapping(mnt_userns, fs_userns))
-               return kgid;
+               return VFSGIDT_INIT(kgid);
        if (initial_idmapping(fs_userns))
                gid = __kgid_val(kgid);
        else
                gid = from_kgid(fs_userns, kgid);
        if (gid == (gid_t)-1)
-               return INVALID_GID;
-       return make_kgid(mnt_userns, gid);
+               return INVALID_VFSGID;
+       return VFSGIDT_INIT(make_kgid(mnt_userns, gid));
+}
+
+static inline kgid_t mapped_kgid_fs(struct user_namespace *mnt_userns,
+                                   struct user_namespace *fs_userns,
+                                   kgid_t kgid)
+{
+       return AS_KGIDT(make_vfsgid(mnt_userns, fs_userns, kgid));
+}
+
+/**
+ * from_vfsuid - map a vfsuid into the filesystem idmapping
+ * @mnt_userns: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @vfsuid : vfsuid to be mapped
+ *
+ * Map @vfsuid into the filesystem idmapping. This function has to be used in
+ * order to e.g. write @vfsuid to inode->i_uid.
+ *
+ * Return: @vfsuid mapped into the filesystem idmapping
+ */
+static inline kuid_t from_vfsuid(struct user_namespace *mnt_userns,
+                                struct user_namespace *fs_userns,
+                                vfsuid_t vfsuid)
+{
+       uid_t uid;
+
+       if (no_idmapping(mnt_userns, fs_userns))
+               return AS_KUIDT(vfsuid);
+       uid = from_kuid(mnt_userns, AS_KUIDT(vfsuid));
+       if (uid == (uid_t)-1)
+               return INVALID_UID;
+       if (initial_idmapping(fs_userns))
+               return KUIDT_INIT(uid);
+       return make_kuid(fs_userns, uid);
 }
 
 /**
@@ -145,16 +311,66 @@ static inline kuid_t mapped_kuid_user(struct user_namespace *mnt_userns,
                                      struct user_namespace *fs_userns,
                                      kuid_t kuid)
 {
-       uid_t uid;
+       return from_vfsuid(mnt_userns, fs_userns, VFSUIDT_INIT(kuid));
+}
+
+/**
+ * vfsuid_has_fsmapping - check whether a vfsuid maps into the filesystem
+ * @mnt_userns: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @vfsuid: vfsuid to be mapped
+ *
+ * Check whether @vfsuid has a mapping in the filesystem idmapping. Use this
+ * function to check whether the filesystem idmapping has a mapping for
+ * @vfsuid.
+ *
+ * Return: true if @vfsuid has a mapping in the filesystem, false if not.
+ */
+static inline bool vfsuid_has_fsmapping(struct user_namespace *mnt_userns,
+                                       struct user_namespace *fs_userns,
+                                       vfsuid_t vfsuid)
+{
+       return uid_valid(from_vfsuid(mnt_userns, fs_userns, vfsuid));
+}
+
+/**
+ * vfsuid_into_kuid - convert vfsuid into kuid
+ * @vfsuid: the vfsuid to convert
+ *
+ * This can be used when a vfsuid is committed as a kuid.
+ *
+ * Return: a kuid with the value of @vfsuid
+ */
+static inline kuid_t vfsuid_into_kuid(vfsuid_t vfsuid)
+{
+       return AS_KUIDT(vfsuid);
+}
+
+/**
+ * from_vfsgid - map a vfsgid into the filesystem idmapping
+ * @mnt_userns: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @vfsgid : vfsgid to be mapped
+ *
+ * Map @vfsgid into the filesystem idmapping. This function has to be used in
+ * order to e.g. write @vfsgid to inode->i_gid.
+ *
+ * Return: @vfsgid mapped into the filesystem idmapping
+ */
+static inline kgid_t from_vfsgid(struct user_namespace *mnt_userns,
+                                struct user_namespace *fs_userns,
+                                vfsgid_t vfsgid)
+{
+       gid_t gid;
 
        if (no_idmapping(mnt_userns, fs_userns))
-               return kuid;
-       uid = from_kuid(mnt_userns, kuid);
-       if (uid == (uid_t)-1)
-               return INVALID_UID;
+               return AS_KGIDT(vfsgid);
+       gid = from_kgid(mnt_userns, AS_KGIDT(vfsgid));
+       if (gid == (gid_t)-1)
+               return INVALID_GID;
        if (initial_idmapping(fs_userns))
-               return KUIDT_INIT(uid);
-       return make_kuid(fs_userns, uid);
+               return KGIDT_INIT(gid);
+       return make_kgid(fs_userns, gid);
 }
 
 /**
@@ -181,16 +397,39 @@ static inline kgid_t mapped_kgid_user(struct user_namespace *mnt_userns,
                                      struct user_namespace *fs_userns,
                                      kgid_t kgid)
 {
-       gid_t gid;
+       return from_vfsgid(mnt_userns, fs_userns, VFSGIDT_INIT(kgid));
+}
 
-       if (no_idmapping(mnt_userns, fs_userns))
-               return kgid;
-       gid = from_kgid(mnt_userns, kgid);
-       if (gid == (gid_t)-1)
-               return INVALID_GID;
-       if (initial_idmapping(fs_userns))
-               return KGIDT_INIT(gid);
-       return make_kgid(fs_userns, gid);
+/**
+ * vfsgid_has_fsmapping - check whether a vfsgid maps into the filesystem
+ * @mnt_userns: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @vfsgid: vfsgid to be mapped
+ *
+ * Check whether @vfsgid has a mapping in the filesystem idmapping. Use this
+ * function to check whether the filesystem idmapping has a mapping for
+ * @vfsgid.
+ *
+ * Return: true if @vfsgid has a mapping in the filesystem, false if not.
+ */
+static inline bool vfsgid_has_fsmapping(struct user_namespace *mnt_userns,
+                                       struct user_namespace *fs_userns,
+                                       vfsgid_t vfsgid)
+{
+       return gid_valid(from_vfsgid(mnt_userns, fs_userns, vfsgid));
+}
+
+/**
+ * vfsgid_into_kgid - convert vfsgid into kgid
+ * @vfsgid: the vfsgid to convert
+ *
+ * This can be used when a vfsgid is committed as a kgid.
+ *
+ * Return: a kgid with the value of @vfsgid
+ */
+static inline kgid_t vfsgid_into_kgid(vfsgid_t vfsgid)
+{
+       return AS_KGIDT(vfsgid);
 }
 
 /**
@@ -209,7 +448,8 @@ static inline kgid_t mapped_kgid_user(struct user_namespace *mnt_userns,
 static inline kuid_t mapped_fsuid(struct user_namespace *mnt_userns,
                                  struct user_namespace *fs_userns)
 {
-       return mapped_kuid_user(mnt_userns, fs_userns, current_fsuid());
+       return from_vfsuid(mnt_userns, fs_userns,
+                          VFSUIDT_INIT(current_fsuid()));
 }
 
 /**
@@ -228,7 +468,8 @@ static inline kuid_t mapped_fsuid(struct user_namespace *mnt_userns,
 static inline kgid_t mapped_fsgid(struct user_namespace *mnt_userns,
                                  struct user_namespace *fs_userns)
 {
-       return mapped_kgid_user(mnt_userns, fs_userns, current_fsgid());
+       return from_vfsgid(mnt_userns, fs_userns,
+                          VFSGIDT_INIT(current_fsgid()));
 }
 
 #endif /* _LINUX_MNT_IDMAPPING_H */
index f615a66..2563d30 100644 (file)
@@ -1671,7 +1671,7 @@ enum netdev_priv_flags {
        IFF_FAILOVER_SLAVE              = 1<<28,
        IFF_L3MDEV_RX_HANDLER           = 1<<29,
        IFF_LIVE_RENAME_OK              = 1<<30,
-       IFF_TX_SKB_NO_LINEAR            = 1<<31,
+       IFF_TX_SKB_NO_LINEAR            = BIT_ULL(31),
        IFF_CHANGE_PROTO_DOWN           = BIT_ULL(32),
 };
 
index 097cdd6..1b18dfa 100644 (file)
@@ -214,7 +214,7 @@ struct netfs_request_ops {
        void (*issue_read)(struct netfs_io_subrequest *subreq);
        bool (*is_still_valid)(struct netfs_io_request *rreq);
        int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
-                                struct folio *folio, void **_fsdata);
+                                struct folio **foliop, void **_fsdata);
        void (*done)(struct netfs_io_request *rreq);
 };
 
@@ -304,7 +304,7 @@ static inline struct netfs_inode *netfs_inode(struct inode *inode)
 
 /**
  * netfs_inode_init - Initialise a netfslib inode context
- * @inode: The netfs inode to initialise
+ * @ctx: The netfs inode to initialise
  * @ops: The netfs's operations list
  *
  * Initialise the netfs library context struct.  This is expected to follow on
index 29ec3e3..07cfc92 100644 (file)
@@ -233,8 +233,8 @@ enum {
 };
 
 enum {
-       NVME_CAP_CRMS_CRIMS     = 1ULL << 59,
-       NVME_CAP_CRMS_CRWMS     = 1ULL << 60,
+       NVME_CAP_CRMS_CRWMS     = 1ULL << 59,
+       NVME_CAP_CRMS_CRIMS     = 1ULL << 60,
 };
 
 struct nvme_id_power_state {
@@ -906,12 +906,14 @@ struct nvme_common_command {
        __le32                  cdw2[2];
        __le64                  metadata;
        union nvme_data_ptr     dptr;
+       struct_group(cdws,
        __le32                  cdw10;
        __le32                  cdw11;
        __le32                  cdw12;
        __le32                  cdw13;
        __le32                  cdw14;
        __le32                  cdw15;
+       );
 };
 
 struct nvme_rw_command {
index 6491fa8..10bc88c 100644 (file)
@@ -32,11 +32,16 @@ struct unwind_hint {
  *
  * UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function.
  * Useful for code which doesn't have an ELF function annotation.
+ *
+ * UNWIND_HINT_ENTRY: machine entry without stack, SYSCALL/SYSENTER etc.
  */
 #define UNWIND_HINT_TYPE_CALL          0
 #define UNWIND_HINT_TYPE_REGS          1
 #define UNWIND_HINT_TYPE_REGS_PARTIAL  2
 #define UNWIND_HINT_TYPE_FUNC          3
+#define UNWIND_HINT_TYPE_ENTRY         4
+#define UNWIND_HINT_TYPE_SAVE          5
+#define UNWIND_HINT_TYPE_RESTORE       6
 
 #ifdef CONFIG_OBJTOOL
 
@@ -124,7 +129,7 @@ struct unwind_hint {
  * the debuginfo as necessary.  It will also warn if it sees any
  * inconsistencies.
  */
-.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0
+.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0
 .Lunwind_hint_ip_\@:
        .pushsection .discard.unwind_hints
                /* struct unwind_hint */
@@ -143,6 +148,12 @@ struct unwind_hint {
        .popsection
 .endm
 
+.macro STACK_FRAME_NON_STANDARD_FP func:req
+#ifdef CONFIG_FRAME_POINTER
+       STACK_FRAME_NON_STANDARD \func
+#endif
+.endm
+
 .macro ANNOTATE_NOENDBR
 .Lhere_\@:
        .pushsection .discard.noendbr
@@ -171,7 +182,7 @@ struct unwind_hint {
 #define ASM_REACHABLE
 #else
 #define ANNOTATE_INTRA_FUNCTION_CALL
-.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0
+.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0
 .endm
 .macro STACK_FRAME_NON_STANDARD func:req
 .endm
index f0a5d6b..20a4e7c 100644 (file)
@@ -441,8 +441,6 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image,
                                   unsigned long initrd_load_addr,
                                   unsigned long initrd_len,
                                   const char *cmdline, size_t extra_fdt_size);
-int ima_get_kexec_buffer(void **addr, size_t *size);
-int ima_free_kexec_buffer(void);
 #else /* CONFIG_OF */
 
 static inline void of_core_init(void)
index 861e606..b7bce49 100644 (file)
@@ -9,15 +9,27 @@
  */
 #define DO_ONCE_LITE(func, ...)                                                \
        DO_ONCE_LITE_IF(true, func, ##__VA_ARGS__)
-#define DO_ONCE_LITE_IF(condition, func, ...)                          \
+
+#define __ONCE_LITE_IF(condition)                                      \
        ({                                                              \
                static bool __section(".data.once") __already_done;     \
-               bool __ret_do_once = !!(condition);                     \
+               bool __ret_cond = !!(condition);                        \
+               bool __ret_once = false;                                \
                                                                        \
-               if (unlikely(__ret_do_once && !__already_done)) {       \
+               if (unlikely(__ret_cond && !__already_done)) {          \
                        __already_done = true;                          \
-                       func(__VA_ARGS__);                              \
+                       __ret_once = true;                              \
                }                                                       \
+               unlikely(__ret_once);                                   \
+       })
+
+#define DO_ONCE_LITE_IF(condition, func, ...)                          \
+       ({                                                              \
+               bool __ret_do_once = !!(condition);                     \
+                                                                       \
+               if (__ONCE_LITE_IF(__ret_do_once))                      \
+                       func(__VA_ARGS__);                              \
+                                                                       \
                unlikely(__ret_do_once);                                \
        })
 
index 0178823..7fa460c 100644 (file)
 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b
 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
+#define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3 0x1727
 #define PCI_DEVICE_ID_AMD_19H_DF_F3    0x1653
 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F3 0x14b0
 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F3 0x167c
 #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d
+#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3
+#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3
 #define PCI_DEVICE_ID_AMD_CNB17H_F3    0x1703
 #define PCI_DEVICE_ID_AMD_LANCE                0x2000
 #define PCI_DEVICE_ID_AMD_LANCE_HOME   0x2001
index 46f9b6f..bf66fe0 100644 (file)
@@ -56,9 +56,13 @@ struct riscv_pmu {
 
        struct cpu_hw_events    __percpu *hw_events;
        struct hlist_node       node;
+       struct notifier_block   riscv_pm_nb;
 };
 
 #define to_riscv_pmu(p) (container_of(p, struct riscv_pmu, pmu))
+
+void riscv_pmu_start(struct perf_event *event, int flags);
+void riscv_pmu_stop(struct perf_event *event, int flags);
 unsigned long riscv_pmu_ctr_read_csr(unsigned long csr);
 int riscv_pmu_event_set_period(struct perf_event *event);
 uint64_t riscv_pmu_ctr_get_width_mask(struct perf_event *event);
index 508f114..b09f7d3 100644 (file)
@@ -572,6 +572,10 @@ struct macsec_ops;
  * @mdix_ctrl: User setting of crossover
  * @pma_extable: Cached value of PMA/PMD Extended Abilities Register
  * @interrupts: Flag interrupts have been enabled
+ * @irq_suspended: Flag indicating PHY is suspended and therefore interrupt
+ *                 handling shall be postponed until PHY has resumed
+ * @irq_rerun: Flag indicating interrupts occurred while PHY was suspended,
+ *             requiring a rerun of the interrupt handler after resume
  * @interface: enum phy_interface_t value
  * @skb: Netlink message for cable diagnostics
  * @nest: Netlink nest used for cable diagnostics
@@ -626,6 +630,8 @@ struct phy_device {
 
        /* Interrupts are enabled */
        unsigned interrupts:1;
+       unsigned irq_suspended:1;
+       unsigned irq_rerun:1;
 
        enum phy_state state;
 
index 9e4d056..0a41b2d 100644 (file)
@@ -88,7 +88,7 @@ extern void pm_runtime_get_suppliers(struct device *dev);
 extern void pm_runtime_put_suppliers(struct device *dev);
 extern void pm_runtime_new_link(struct device *dev);
 extern void pm_runtime_drop_link(struct device_link *link);
-extern void pm_runtime_release_supplier(struct device_link *link, bool check_idle);
+extern void pm_runtime_release_supplier(struct device_link *link);
 
 extern int devm_pm_runtime_enable(struct device *dev);
 
@@ -314,8 +314,7 @@ static inline void pm_runtime_get_suppliers(struct device *dev) {}
 static inline void pm_runtime_put_suppliers(struct device *dev) {}
 static inline void pm_runtime_new_link(struct device *dev) {}
 static inline void pm_runtime_drop_link(struct device_link *link) {}
-static inline void pm_runtime_release_supplier(struct device_link *link,
-                                              bool check_idle) {}
+static inline void pm_runtime_release_supplier(struct device_link *link) {}
 
 #endif /* !CONFIG_PM */
 
index b65c877..7d1e604 100644 (file)
@@ -73,6 +73,7 @@ extern int set_posix_acl(struct user_namespace *, struct inode *, int,
                         struct posix_acl *);
 
 struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
+struct posix_acl *posix_acl_clone(const struct posix_acl *acl, gfp_t flags);
 
 #ifdef CONFIG_FS_POSIX_ACL
 int posix_acl_chmod(struct user_namespace *, struct inode *, umode_t);
index 1766e1d..b6bd3ea 100644 (file)
@@ -33,21 +33,31 @@ posix_acl_xattr_count(size_t size)
 }
 
 #ifdef CONFIG_FS_POSIX_ACL
-void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
-                                  struct inode *inode,
-                                  void *value, size_t size);
-void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
-                                  struct inode *inode,
-                                void *value, size_t size);
+void posix_acl_fix_xattr_from_user(void *value, size_t size);
+void posix_acl_fix_xattr_to_user(void *value, size_t size);
+void posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns,
+                                    const struct inode *inode,
+                                    void *value, size_t size);
+void posix_acl_setxattr_idmapped_mnt(struct user_namespace *mnt_userns,
+                                    const struct inode *inode,
+                                    void *value, size_t size);
 #else
-static inline void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
-                                                struct inode *inode,
-                                                void *value, size_t size)
+static inline void posix_acl_fix_xattr_from_user(void *value, size_t size)
 {
 }
-static inline void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
-                                              struct inode *inode,
-                                              void *value, size_t size)
+static inline void posix_acl_fix_xattr_to_user(void *value, size_t size)
+{
+}
+static inline void
+posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns,
+                               const struct inode *inode, void *value,
+                               size_t size)
+{
+}
+static inline void
+posix_acl_setxattr_idmapped_mnt(struct user_namespace *mnt_userns,
+                               const struct inode *inode, void *value,
+                               size_t size)
 {
 }
 #endif
index 10ec29b..cf7d666 100644 (file)
@@ -169,9 +169,6 @@ extern void __printk_safe_exit(void);
 #define printk_deferred_enter __printk_safe_enter
 #define printk_deferred_exit __printk_safe_exit
 
-extern void printk_prefer_direct_enter(void);
-extern void printk_prefer_direct_exit(void);
-
 extern bool pr_flush(int timeout_ms, bool reset_on_progress);
 
 /*
@@ -224,14 +221,6 @@ static inline void printk_deferred_exit(void)
 {
 }
 
-static inline void printk_prefer_direct_enter(void)
-{
-}
-
-static inline void printk_prefer_direct_exit(void)
-{
-}
-
 static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
 {
        return true;
index a0f6668..0d8625d 100644 (file)
@@ -20,11 +20,12 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
 }
 
 /* i_mutex must being held */
-static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
+static inline bool is_quota_modification(struct user_namespace *mnt_userns,
+                                        struct inode *inode, struct iattr *ia)
 {
-       return (ia->ia_valid & ATTR_SIZE) ||
-               (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) ||
-               (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
+       return ((ia->ia_valid & ATTR_SIZE) ||
+               i_uid_needs_update(mnt_userns, ia, inode) ||
+               i_gid_needs_update(mnt_userns, ia, inode));
 }
 
 #if defined(CONFIG_QUOTA)
@@ -115,7 +116,8 @@ int dquot_set_dqblk(struct super_block *sb, struct kqid id,
                struct qc_dqblk *di);
 
 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
-int dquot_transfer(struct inode *inode, struct iattr *iattr);
+int dquot_transfer(struct user_namespace *mnt_userns, struct inode *inode,
+                  struct iattr *iattr);
 
 static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type)
 {
@@ -234,7 +236,8 @@ static inline void dquot_free_inode(struct inode *inode)
 {
 }
 
-static inline int dquot_transfer(struct inode *inode, struct iattr *iattr)
+static inline int dquot_transfer(struct user_namespace *mnt_userns,
+                                struct inode *inode, struct iattr *iattr)
 {
        return 0;
 }
index c21c7f8..0022666 100644 (file)
@@ -23,12 +23,16 @@ struct ratelimit_state {
        unsigned long   flags;
 };
 
-#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) {                \
-               .lock           = __RAW_SPIN_LOCK_UNLOCKED(name.lock),  \
-               .interval       = interval_init,                        \
-               .burst          = burst_init,                           \
+#define RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, flags_init) { \
+               .lock           = __RAW_SPIN_LOCK_UNLOCKED(name.lock),            \
+               .interval       = interval_init,                                  \
+               .burst          = burst_init,                                     \
+               .flags          = flags_init,                                     \
        }
 
+#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
+       RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, 0)
+
 #define RATELIMIT_STATE_INIT_DISABLED                                  \
        RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST)
 
index b8a6e38..a62fcca 100644 (file)
@@ -361,9 +361,9 @@ static inline void refcount_dec(refcount_t *r)
 
 extern __must_check bool refcount_dec_if_one(refcount_t *r);
 extern __must_check bool refcount_dec_not_one(refcount_t *r);
-extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
-extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
+extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock);
+extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock);
 extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
                                                       spinlock_t *lock,
-                                                      unsigned long *flags);
+                                                      unsigned long *flags) __cond_acquires(lock);
 #endif /* _LINUX_REFCOUNT_H */
index 8a21b57..514ddf0 100644 (file)
@@ -731,7 +731,7 @@ static inline int __must_check
 devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
                                               struct reset_control_bulk_data *rstcs)
 {
-       return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, true);
+       return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true);
 }
 
 /**
index 159729c..3247ed8 100644 (file)
@@ -54,8 +54,6 @@ struct rtsx_ucr {
        struct usb_device       *pusb_dev;
        struct usb_interface    *pusb_intf;
        struct usb_sg_request   current_sg;
-       unsigned char           *iobuf;
-       dma_addr_t              iobuf_dma;
 
        struct timer_list       sg_timer;
        struct mutex            dev_mutex;
index 505aaf9..81cab4b 100644 (file)
@@ -85,7 +85,7 @@ static inline void exit_thread(struct task_struct *tsk)
 extern __noreturn void do_group_exit(int);
 
 extern void exit_files(struct task_struct *);
-extern void exit_itimers(struct signal_struct *);
+extern void exit_itimers(struct task_struct *);
 
 extern pid_t kernel_clone(struct kernel_clone_args *kargs);
 struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
index 1c58646..704111f 100644 (file)
@@ -13,8 +13,9 @@
 #include <linux/notifier.h>
 #include <linux/types.h>
 
-#define SCMI_MAX_STR_SIZE      64
-#define SCMI_MAX_NUM_RATES     16
+#define SCMI_MAX_STR_SIZE              64
+#define SCMI_SHORT_NAME_MAX_SIZE       16
+#define SCMI_MAX_NUM_RATES             16
 
 /**
  * struct scmi_revision_info - version information structure
@@ -36,8 +37,8 @@ struct scmi_revision_info {
        u8 num_protocols;
        u8 num_agents;
        u32 impl_ver;
-       char vendor_id[SCMI_MAX_STR_SIZE];
-       char sub_vendor_id[SCMI_MAX_STR_SIZE];
+       char vendor_id[SCMI_SHORT_NAME_MAX_SIZE];
+       char sub_vendor_id[SCMI_SHORT_NAME_MAX_SIZE];
 };
 
 struct scmi_clock_info {
index 7fc4e9f..4d0baf3 100644 (file)
@@ -353,7 +353,8 @@ int security_inode_readlink(struct dentry *dentry);
 int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
                               bool rcu);
 int security_inode_permission(struct inode *inode, int mask);
-int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
+int security_inode_setattr(struct user_namespace *mnt_userns,
+                          struct dentry *dentry, struct iattr *attr);
 int security_inode_getattr(const struct path *path);
 int security_inode_setxattr(struct user_namespace *mnt_userns,
                            struct dentry *dentry, const char *name,
@@ -848,8 +849,9 @@ static inline int security_inode_permission(struct inode *inode, int mask)
        return 0;
 }
 
-static inline int security_inode_setattr(struct dentry *dentry,
-                                         struct iattr *attr)
+static inline int security_inode_setattr(struct user_namespace *mnt_userns,
+                                        struct dentry *dentry,
+                                        struct iattr *attr)
 {
        return 0;
 }
index cbd5070..fde258b 100644 (file)
@@ -45,6 +45,7 @@ struct uart_ops {
        void            (*unthrottle)(struct uart_port *);
        void            (*send_xchar)(struct uart_port *, char ch);
        void            (*stop_rx)(struct uart_port *);
+       void            (*start_rx)(struct uart_port *);
        void            (*enable_ms)(struct uart_port *);
        void            (*break_ctl)(struct uart_port *, int ctl);
        int             (*startup)(struct uart_port *);
@@ -389,6 +390,11 @@ static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED;
 static inline int setup_earlycon(char *buf) { return 0; }
 #endif
 
+static inline bool uart_console_enabled(struct uart_port *port)
+{
+       return uart_console(port) && (port->cons->flags & CON_ENABLED);
+}
+
 struct uart_port *uart_get_console(struct uart_port *ports, int nr,
                                   struct console *c);
 int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
index 2991785..8df475d 100644 (file)
@@ -260,6 +260,7 @@ struct plat_stmmacenet_data {
        bool has_crossts;
        int int_snapshot_num;
        int ext_snapshot_num;
+       bool int_snapshot_en;
        bool ext_snapshot_en;
        bool multi_msi_en;
        int msi_mac_vec;
index b0dcfa2..8ba8b5b 100644 (file)
@@ -55,6 +55,18 @@ struct efifb_dmi_info {
        int flags;
 };
 
+#ifdef CONFIG_SYSFB
+
+void sysfb_disable(void);
+
+#else /* CONFIG_SYSFB */
+
+static inline void sysfb_disable(void)
+{
+}
+
+#endif /* CONFIG_SYSFB */
+
 #ifdef CONFIG_EFI
 
 extern struct efifb_dmi_info efifb_dmi_list[];
@@ -72,8 +84,8 @@ static inline void sysfb_apply_efi_quirks(struct platform_device *pd)
 
 bool sysfb_parse_mode(const struct screen_info *si,
                      struct simplefb_platform_data *mode);
-int sysfb_create_simplefb(const struct screen_info *si,
-                         const struct simplefb_platform_data *mode);
+struct platform_device *sysfb_create_simplefb(const struct screen_info *si,
+                                             const struct simplefb_platform_data *mode);
 
 #else /* CONFIG_SYSFB_SIMPLE */
 
@@ -83,10 +95,10 @@ static inline bool sysfb_parse_mode(const struct screen_info *si,
        return false;
 }
 
-static inline int sysfb_create_simplefb(const struct screen_info *si,
-                                        const struct simplefb_platform_data *mode)
+static inline struct platform_device *sysfb_create_simplefb(const struct screen_info *si,
+                                                           const struct simplefb_platform_data *mode)
 {
-       return -EINVAL;
+       return ERR_PTR(-EINVAL);
 }
 
 #endif /* CONFIG_SYSFB_SIMPLE */
index 49c7c32..b47c2e7 100644 (file)
@@ -257,6 +257,7 @@ void virtio_device_ready(struct virtio_device *dev)
 
        WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
 
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
        /*
         * The virtio_synchronize_cbs() makes sure vring_interrupt()
         * will see the driver specific setup if it sees vq->broken
@@ -264,6 +265,7 @@ void virtio_device_ready(struct virtio_device *dev)
         */
        virtio_synchronize_cbs(dev);
        __virtio_unbreak_device(dev);
+#endif
        /*
         * The transport should ensure the visibility of vq->broken
         * before setting DRIVER_OK. See the comments for the transport
diff --git a/include/linux/visorbus.h b/include/linux/visorbus.h
deleted file mode 100644 (file)
index 0d8bd67..0000000
+++ /dev/null
@@ -1,344 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
- * All rights reserved.
- */
-
-/*
- *  This header file is to be included by other kernel mode components that
- *  implement a particular kind of visor_device.  Each of these other kernel
- *  mode components is called a visor device driver.  Refer to visortemplate
- *  for a minimal sample visor device driver.
- *
- *  There should be nothing in this file that is private to the visorbus
- *  bus implementation itself.
- */
-
-#ifndef __VISORBUS_H__
-#define __VISORBUS_H__
-
-#include <linux/device.h>
-
-#define VISOR_CHANNEL_SIGNATURE ('L' << 24 | 'N' << 16 | 'C' << 8 | 'E')
-
-/*
- * enum channel_serverstate
- * @CHANNELSRV_UNINITIALIZED: Channel is in an undefined state.
- * @CHANNELSRV_READY:        Channel has been initialized by server.
- */
-enum channel_serverstate {
-       CHANNELSRV_UNINITIALIZED = 0,
-       CHANNELSRV_READY = 1
-};
-
-/*
- * enum channel_clientstate
- * @CHANNELCLI_DETACHED:
- * @CHANNELCLI_DISABLED:  Client can see channel but is NOT allowed to use it
- *                       unless given TBD* explicit request
- *                       (should actually be < DETACHED).
- * @CHANNELCLI_ATTACHING: Legacy EFI client request for EFI server to attach.
- * @CHANNELCLI_ATTACHED:  Idle, but client may want to use channel any time.
- * @CHANNELCLI_BUSY:     Client either wants to use or is using channel.
- * @CHANNELCLI_OWNED:    "No worries" state - client can access channel
- *                       anytime.
- */
-enum channel_clientstate {
-       CHANNELCLI_DETACHED = 0,
-       CHANNELCLI_DISABLED = 1,
-       CHANNELCLI_ATTACHING = 2,
-       CHANNELCLI_ATTACHED = 3,
-       CHANNELCLI_BUSY = 4,
-       CHANNELCLI_OWNED = 5
-};
-
-/*
- * Values for VISOR_CHANNEL_PROTOCOL.Features: This define exists so that
- * a guest can look at the FeatureFlags in the io channel, and configure the
- * driver to use interrupts or not based on this setting. All feature bits for
- * all channels should be defined here. The io channel feature bits are defined
- * below.
- */
-#define VISOR_DRIVER_ENABLES_INTS (0x1ULL << 1)
-#define VISOR_CHANNEL_IS_POLLING (0x1ULL << 3)
-#define VISOR_IOVM_OK_DRIVER_DISABLING_INTS (0x1ULL << 4)
-#define VISOR_DRIVER_DISABLES_INTS (0x1ULL << 5)
-#define VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6)
-
-/*
- * struct channel_header - Common Channel Header
- * @signature:        Signature.
- * @legacy_state:      DEPRECATED - being replaced by.
- * @header_size:       sizeof(struct channel_header).
- * @size:             Total size of this channel in bytes.
- * @features:         Flags to modify behavior.
- * @chtype:           Channel type: data, bus, control, etc..
- * @partition_handle:  ID of guest partition.
- * @handle:           Device number of this channel in client.
- * @ch_space_offset:   Offset in bytes to channel specific area.
- * @version_id:               Struct channel_header Version ID.
- * @partition_index:   Index of guest partition.
- * @zone_uuid:        Guid of Channel's zone.
- * @cli_str_offset:    Offset from channel header to null-terminated
- *                    ClientString (0 if ClientString not present).
- * @cli_state_boot:    CHANNEL_CLIENTSTATE of pre-boot EFI client of this
- *                    channel.
- * @cmd_state_cli:     CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see
- *                    ServerStateUp, ServerStateDown, etc).
- * @cli_state_os:      CHANNEL_CLIENTSTATE of Guest OS client of this channel.
- * @ch_characteristic: CHANNEL_CHARACTERISTIC_<xxx>.
- * @cmd_state_srv:     CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see
- *                    ServerStateUp, ServerStateDown, etc).
- * @srv_state:        CHANNEL_SERVERSTATE.
- * @cli_error_boot:    Bits to indicate err states for boot clients, so err
- *                    messages can be throttled.
- * @cli_error_os:      Bits to indicate err states for OS clients, so err
- *                    messages can be throttled.
- * @filler:           Pad out to 128 byte cacheline.
- * @recover_channel:   Please add all new single-byte values below here.
- */
-struct channel_header {
-       u64 signature;
-       u32 legacy_state;
-       /* SrvState, CliStateBoot, and CliStateOS below */
-       u32 header_size;
-       u64 size;
-       u64 features;
-       guid_t chtype;
-       u64 partition_handle;
-       u64 handle;
-       u64 ch_space_offset;
-       u32 version_id;
-       u32 partition_index;
-       guid_t zone_guid;
-       u32 cli_str_offset;
-       u32 cli_state_boot;
-       u32 cmd_state_cli;
-       u32 cli_state_os;
-       u32 ch_characteristic;
-       u32 cmd_state_srv;
-       u32 srv_state;
-       u8 cli_error_boot;
-       u8 cli_error_os;
-       u8 filler[1];
-       u8 recover_channel;
-} __packed;
-
-#define VISOR_CHANNEL_ENABLE_INTS (0x1ULL << 0)
-
-/*
- * struct signal_queue_header - Subheader for the Signal Type variation of the
- *                              Common Channel.
- * @version:         SIGNAL_QUEUE_HEADER Version ID.
- * @chtype:          Queue type: storage, network.
- * @size:            Total size of this queue in bytes.
- * @sig_base_offset:  Offset to signal queue area.
- * @features:        Flags to modify behavior.
- * @num_sent:        Total # of signals placed in this queue.
- * @num_overflows:    Total # of inserts failed due to full queue.
- * @signal_size:      Total size of a signal for this queue.
- * @max_slots:        Max # of slots in queue, 1 slot is always empty.
- * @max_signals:      Max # of signals in queue (MaxSignalSlots-1).
- * @head:            Queue head signal #.
- * @num_received:     Total # of signals removed from this queue.
- * @tail:            Queue tail signal.
- * @reserved1:       Reserved field.
- * @reserved2:       Reserved field.
- * @client_queue:
- * @num_irq_received: Total # of Interrupts received. This is incremented by the
- *                   ISR in the guest windows driver.
- * @num_empty:       Number of times that visor_signal_remove is called and
- *                   returned Empty Status.
- * @errorflags:              Error bits set during SignalReinit to denote trouble with
- *                   client's fields.
- * @filler:          Pad out to 64 byte cacheline.
- */
-struct signal_queue_header {
-       /* 1st cache line */
-       u32 version;
-       u32 chtype;
-       u64 size;
-       u64 sig_base_offset;
-       u64 features;
-       u64 num_sent;
-       u64 num_overflows;
-       u32 signal_size;
-       u32 max_slots;
-       u32 max_signals;
-       u32 head;
-       /* 2nd cache line */
-       u64 num_received;
-       u32 tail;
-       u32 reserved1;
-       u64 reserved2;
-       u64 client_queue;
-       u64 num_irq_received;
-       u64 num_empty;
-       u32 errorflags;
-       u8 filler[12];
-} __packed;
-
-/* VISORCHANNEL Guids */
-/* {414815ed-c58c-11da-95a9-00e08161165f} */
-#define VISOR_VHBA_CHANNEL_GUID \
-       GUID_INIT(0x414815ed, 0xc58c, 0x11da, \
-                 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-#define VISOR_VHBA_CHANNEL_GUID_STR \
-       "414815ed-c58c-11da-95a9-00e08161165f"
-struct visorchipset_state {
-       u32 created:1;
-       u32 attached:1;
-       u32 configured:1;
-       u32 running:1;
-       /* Remaining bits in this 32-bit word are reserved. */
-};
-
-/**
- * struct visor_device - A device type for things "plugged" into the visorbus
- *                       bus
- * @visorchannel:              Points to the channel that the device is
- *                             associated with.
- * @channel_type_guid:         Identifies the channel type to the bus driver.
- * @device:                    Device struct meant for use by the bus driver
- *                             only.
- * @list_all:                  Used by the bus driver to enumerate devices.
- * @timer:                     Timer fired periodically to do interrupt-type
- *                             activity.
- * @being_removed:             Indicates that the device is being removed from
- *                             the bus. Private bus driver use only.
- * @visordriver_callback_lock: Used by the bus driver to lock when adding and
- *                             removing devices.
- * @pausing:                   Indicates that a change towards a paused state.
- *                             is in progress. Only modified by the bus driver.
- * @resuming:                  Indicates that a change towards a running state
- *                             is in progress. Only modified by the bus driver.
- * @chipset_bus_no:            Private field used by the bus driver.
- * @chipset_dev_no:            Private field used the bus driver.
- * @state:                     Used to indicate the current state of the
- *                             device.
- * @inst:                      Unique GUID for this instance of the device.
- * @name:                      Name of the device.
- * @pending_msg_hdr:           For private use by bus driver to respond to
- *                             hypervisor requests.
- * @vbus_hdr_info:             A pointer to header info. Private use by bus
- *                             driver.
- * @partition_guid:            Indicates client partion id. This should be the
- *                             same across all visor_devices in the current
- *                             guest. Private use by bus driver only.
- */
-struct visor_device {
-       struct visorchannel *visorchannel;
-       guid_t channel_type_guid;
-       /* These fields are for private use by the bus driver only. */
-       struct device device;
-       struct list_head list_all;
-       struct timer_list timer;
-       bool timer_active;
-       bool being_removed;
-       struct mutex visordriver_callback_lock; /* synchronize probe/remove */
-       bool pausing;
-       bool resuming;
-       u32 chipset_bus_no;
-       u32 chipset_dev_no;
-       struct visorchipset_state state;
-       guid_t inst;
-       u8 *name;
-       struct controlvm_message_header *pending_msg_hdr;
-       void *vbus_hdr_info;
-       guid_t partition_guid;
-       struct dentry *debugfs_dir;
-       struct dentry *debugfs_bus_info;
-};
-
-#define to_visor_device(x) container_of(x, struct visor_device, device)
-
-typedef void (*visorbus_state_complete_func) (struct visor_device *dev,
-                                             int status);
-
-/*
- * This struct describes a specific visor channel, by providing its GUID, name,
- * and sizes.
- */
-struct visor_channeltype_descriptor {
-       const guid_t guid;
-       const char *name;
-       u64 min_bytes;
-       u32 version;
-};
-
-/**
- * struct visor_driver - Information provided by each visor driver when it
- *                       registers with the visorbus driver
- * @name:              Name of the visor driver.
- * @owner:             The module owner.
- * @channel_types:     Types of channels handled by this driver, ending with
- *                     a zero GUID. Our specialized BUS.match() method knows
- *                     about this list, and uses it to determine whether this
- *                     driver will in fact handle a new device that it has
- *                     detected.
- * @probe:             Called when a new device comes online, by our probe()
- *                     function specified by driver.probe() (triggered
- *                     ultimately by some call to driver_register(),
- *                     bus_add_driver(), or driver_attach()).
- * @remove:            Called when a new device is removed, by our remove()
- *                     function specified by driver.remove() (triggered
- *                     ultimately by some call to device_release_driver()).
- * @channel_interrupt: Called periodically, whenever there is a possiblity
- *                     that "something interesting" may have happened to the
- *                     channel.
- * @pause:             Called to initiate a change of the device's state.  If
- *                     the return valu`e is < 0, there was an error and the
- *                     state transition will NOT occur.  If the return value
- *                     is >= 0, then the state transition was INITIATED
- *                     successfully, and complete_func() will be called (or
- *                     was just called) with the final status when either the
- *                     state transition fails or completes successfully.
- * @resume:            Behaves similar to pause.
- * @driver:            Private reference to the device driver. For use by bus
- *                     driver only.
- */
-struct visor_driver {
-       const char *name;
-       struct module *owner;
-       struct visor_channeltype_descriptor *channel_types;
-       int (*probe)(struct visor_device *dev);
-       void (*remove)(struct visor_device *dev);
-       void (*channel_interrupt)(struct visor_device *dev);
-       int (*pause)(struct visor_device *dev,
-                    visorbus_state_complete_func complete_func);
-       int (*resume)(struct visor_device *dev,
-                     visorbus_state_complete_func complete_func);
-
-       /* These fields are for private use by the bus driver only. */
-       struct device_driver driver;
-};
-
-#define to_visor_driver(x) (container_of(x, struct visor_driver, driver))
-
-int visor_check_channel(struct channel_header *ch, struct device *dev,
-                       const guid_t *expected_uuid, char *chname,
-                       u64 expected_min_bytes, u32 expected_version,
-                       u64 expected_signature);
-
-int visorbus_register_visor_driver(struct visor_driver *drv);
-void visorbus_unregister_visor_driver(struct visor_driver *drv);
-int visorbus_read_channel(struct visor_device *dev,
-                         unsigned long offset, void *dest,
-                         unsigned long nbytes);
-int visorbus_write_channel(struct visor_device *dev,
-                          unsigned long offset, void *src,
-                          unsigned long nbytes);
-int visorbus_enable_channel_interrupts(struct visor_device *dev);
-void visorbus_disable_channel_interrupts(struct visor_device *dev);
-
-int visorchannel_signalremove(struct visorchannel *channel, u32 queue,
-                             void *msg);
-int visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
-                             void *msg);
-bool visorchannel_signalempty(struct visorchannel *channel, u32 queue);
-const guid_t *visorchannel_get_guid(struct visorchannel *channel);
-
-#define BUS_ROOT_DEVICE UINT_MAX
-struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
-                                              struct visor_device *from);
-#endif
index b159c27..096d48a 100644 (file)
@@ -215,6 +215,7 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size,
 void free_vm_area(struct vm_struct *area);
 extern struct vm_struct *remove_vm_area(const void *addr);
 extern struct vm_struct *find_vm_area(const void *addr);
+struct vmap_area *find_vmap_area(unsigned long addr);
 
 static inline bool is_vm_area_hugepages(const void *addr)
 {
index 4c379d2..979a9d3 100644 (file)
@@ -61,7 +61,7 @@ int __vfs_setxattr_locked(struct user_namespace *, struct dentry *,
                          const char *, const void *, size_t, int,
                          struct inode **);
 int vfs_setxattr(struct user_namespace *, struct dentry *, const char *,
-                const void *, size_t, int);
+                void *, size_t, int);
 int __vfs_removexattr(struct user_namespace *, struct dentry *, const char *);
 int __vfs_removexattr_locked(struct user_namespace *, struct dentry *,
                             const char *, struct inode **);
index f7506f0..c04f359 100644 (file)
@@ -405,6 +405,9 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev)
 {
        const struct inet6_dev *idev = __in6_dev_get(dev);
 
+       if (unlikely(!idev))
+               return true;
+
        return !!idev->cnf.ignore_routes_with_linkdown;
 }
 
index 0e40c3d..08fc30c 100644 (file)
@@ -78,6 +78,15 @@ enum amt_status {
 
 #define AMT_STATUS_MAX (__AMT_STATUS_MAX - 1)
 
+/* Gateway events only */
+enum amt_event {
+       AMT_EVENT_NONE,
+       AMT_EVENT_RECEIVE,
+       AMT_EVENT_SEND_DISCOVERY,
+       AMT_EVENT_SEND_REQUEST,
+       __AMT_EVENT_MAX,
+};
+
 struct amt_header {
 #if defined(__LITTLE_ENDIAN_BITFIELD)
        u8 type:4,
@@ -292,6 +301,12 @@ struct amt_group_node {
        struct hlist_head       sources[];
 };
 
+#define AMT_MAX_EVENTS 16
+struct amt_events {
+       enum amt_event event;
+       struct sk_buff *skb;
+};
+
 struct amt_dev {
        struct net_device       *dev;
        struct net_device       *stream_dev;
@@ -308,6 +323,7 @@ struct amt_dev {
        struct delayed_work     req_wq;
        /* Protected by RTNL */
        struct delayed_work     secret_wq;
+       struct work_struct      event_wq;
        /* AMT status */
        enum amt_status         status;
        /* Generated key */
@@ -345,6 +361,10 @@ struct amt_dev {
        /* Used only in gateway mode */
        u64                     mac:48,
                                reserved:16;
+       /* AMT gateway side message handler queue */
+       struct amt_events       events[AMT_MAX_EVENTS];
+       u8                      event_idx;
+       u8                      nr_events;
 };
 
 #define AMT_TOS                        0xc0
index 3c4f550..2f766e3 100644 (file)
@@ -847,6 +847,7 @@ enum {
 };
 
 void l2cap_chan_hold(struct l2cap_chan *c);
+struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c);
 void l2cap_chan_put(struct l2cap_chan *c);
 
 static inline void l2cap_chan_lock(struct l2cap_chan *chan)
index 6d02e12..80f4144 100644 (file)
@@ -8462,11 +8462,12 @@ int cfg80211_bss_color_notify(struct net_device *dev, gfp_t gfp,
  * cfg80211_obss_color_collision_notify - notify about bss color collision
  * @dev: network device
  * @color_bitmap: representations of the colors that the local BSS is aware of
+ * @gfp: allocation flags
  */
 static inline int cfg80211_obss_color_collision_notify(struct net_device *dev,
-                                                      u64 color_bitmap)
+                                                      u64 color_bitmap, gfp_t gfp)
 {
-       return cfg80211_bss_color_notify(dev, GFP_KERNEL,
+       return cfg80211_bss_color_notify(dev, gfp,
                                         NL80211_CMD_OBSS_COLOR_COLLISION,
                                         0, color_bitmap);
 }
index 6484095..7ac3138 100644 (file)
@@ -152,6 +152,7 @@ enum flow_action_id {
        FLOW_ACTION_PIPE,
        FLOW_ACTION_VLAN_PUSH_ETH,
        FLOW_ACTION_VLAN_POP_ETH,
+       FLOW_ACTION_CONTINUE,
        NUM_FLOW_ACTIONS,
 };
 
index 077cd73..ee88f0f 100644 (file)
@@ -25,7 +25,6 @@
 #undef INET_CSK_CLEAR_TIMERS
 
 struct inet_bind_bucket;
-struct inet_bind2_bucket;
 struct tcp_congestion_ops;
 
 /*
@@ -58,7 +57,6 @@ struct inet_connection_sock_af_ops {
  *
  * @icsk_accept_queue:    FIFO of established children
  * @icsk_bind_hash:       Bind node
- * @icsk_bind2_hash:      Bind node in the bhash2 table
  * @icsk_timeout:         Timeout
  * @icsk_retransmit_timer: Resend (no ack)
  * @icsk_rto:             Retransmit timeout
@@ -85,7 +83,6 @@ struct inet_connection_sock {
        struct inet_sock          icsk_inet;
        struct request_sock_queue icsk_accept_queue;
        struct inet_bind_bucket   *icsk_bind_hash;
-       struct inet_bind2_bucket  *icsk_bind2_hash;
        unsigned long             icsk_timeout;
        struct timer_list         icsk_retransmit_timer;
        struct timer_list         icsk_delack_timer;
@@ -324,7 +321,7 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
 
 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
 
-#define TCP_PINGPONG_THRESH    3
+#define TCP_PINGPONG_THRESH    1
 
 static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
 {
@@ -341,14 +338,6 @@ static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
        return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
 }
 
-static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
-{
-       struct inet_connection_sock *icsk = inet_csk(sk);
-
-       if (icsk->icsk_ack.pingpong < U8_MAX)
-               icsk->icsk_ack.pingpong++;
-}
-
 static inline bool inet_csk_has_ulp(struct sock *sk)
 {
        return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
index a0887b7..fd6b510 100644 (file)
@@ -90,32 +90,11 @@ struct inet_bind_bucket {
        struct hlist_head       owners;
 };
 
-struct inet_bind2_bucket {
-       possible_net_t          ib_net;
-       int                     l3mdev;
-       unsigned short          port;
-       union {
-#if IS_ENABLED(CONFIG_IPV6)
-               struct in6_addr         v6_rcv_saddr;
-#endif
-               __be32                  rcv_saddr;
-       };
-       /* Node in the inet2_bind_hashbucket chain */
-       struct hlist_node       node;
-       /* List of sockets hashed to this bucket */
-       struct hlist_head       owners;
-};
-
 static inline struct net *ib_net(struct inet_bind_bucket *ib)
 {
        return read_pnet(&ib->ib_net);
 }
 
-static inline struct net *ib2_net(struct inet_bind2_bucket *ib)
-{
-       return read_pnet(&ib->ib_net);
-}
-
 #define inet_bind_bucket_for_each(tb, head) \
        hlist_for_each_entry(tb, head, node)
 
@@ -124,15 +103,6 @@ struct inet_bind_hashbucket {
        struct hlist_head       chain;
 };
 
-/* This is synchronized using the inet_bind_hashbucket's spinlock.
- * Instead of having separate spinlocks, the inet_bind2_hashbucket can share
- * the inet_bind_hashbucket's given that in every case where the bhash2 table
- * is useful, a lookup in the bhash table also occurs.
- */
-struct inet_bind2_hashbucket {
-       struct hlist_head       chain;
-};
-
 /* Sockets can be hashed in established or listening table.
  * We must use different 'nulls' end-of-chain value for all hash buckets :
  * A socket might transition from ESTABLISH to LISTEN state without
@@ -164,12 +134,6 @@ struct inet_hashinfo {
         */
        struct kmem_cache               *bind_bucket_cachep;
        struct inet_bind_hashbucket     *bhash;
-       /* The 2nd binding table hashed by port and address.
-        * This is used primarily for expediting the resolution of bind
-        * conflicts.
-        */
-       struct kmem_cache               *bind2_bucket_cachep;
-       struct inet_bind2_hashbucket    *bhash2;
        unsigned int                    bhash_size;
 
        /* The 2nd listener table hashed by local port and address */
@@ -215,7 +179,7 @@ static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
                                        int dif, int sdif)
 {
 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
-       return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept,
+       return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept),
                                 bound_dev_if, dif, sdif);
 #else
        return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
@@ -229,36 +193,6 @@ inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
 void inet_bind_bucket_destroy(struct kmem_cache *cachep,
                              struct inet_bind_bucket *tb);
 
-static inline bool check_bind_bucket_match(struct inet_bind_bucket *tb,
-                                          struct net *net,
-                                          const unsigned short port,
-                                          int l3mdev)
-{
-       return net_eq(ib_net(tb), net) && tb->port == port &&
-               tb->l3mdev == l3mdev;
-}
-
-struct inet_bind2_bucket *
-inet_bind2_bucket_create(struct kmem_cache *cachep, struct net *net,
-                        struct inet_bind2_hashbucket *head,
-                        const unsigned short port, int l3mdev,
-                        const struct sock *sk);
-
-void inet_bind2_bucket_destroy(struct kmem_cache *cachep,
-                              struct inet_bind2_bucket *tb);
-
-struct inet_bind2_bucket *
-inet_bind2_bucket_find(struct inet_hashinfo *hinfo, struct net *net,
-                      const unsigned short port, int l3mdev,
-                      struct sock *sk,
-                      struct inet_bind2_hashbucket **head);
-
-bool check_bind2_bucket_match_nulladdr(struct inet_bind2_bucket *tb,
-                                      struct net *net,
-                                      const unsigned short port,
-                                      int l3mdev,
-                                      const struct sock *sk);
-
 static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
                               const u32 bhash_size)
 {
@@ -266,7 +200,7 @@ static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
 }
 
 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
-                   struct inet_bind2_bucket *tb2, const unsigned short snum);
+                   const unsigned short snum);
 
 /* Caller must disable local BH processing. */
 int __inet_inherit_port(const struct sock *sk, struct sock *child);
index c1b5dcd..6395f6b 100644 (file)
@@ -107,7 +107,8 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
 
 static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
 {
-       if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)
+       if (!sk->sk_mark &&
+           READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept))
                return skb->mark;
 
        return sk->sk_mark;
@@ -120,7 +121,7 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
 #ifdef CONFIG_NET_L3_MASTER_DEV
        struct net *net = sock_net(sk);
 
-       if (!bound_dev_if && net->ipv4.sysctl_tcp_l3mdev_accept)
+       if (!bound_dev_if && READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept))
                return l3mdev_master_ifindex_by_index(net, skb->skb_iif);
 #endif
 
@@ -132,7 +133,7 @@ static inline int inet_sk_bound_l3mdev(const struct sock *sk)
 #ifdef CONFIG_NET_L3_MASTER_DEV
        struct net *net = sock_net(sk);
 
-       if (!net->ipv4.sysctl_tcp_l3mdev_accept)
+       if (!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept))
                return l3mdev_master_ifindex_by_index(net,
                                                      sk->sk_bound_dev_if);
 #endif
@@ -253,6 +254,11 @@ struct inet_sock {
 #define IP_CMSG_CHECKSUM       BIT(7)
 #define IP_CMSG_RECVFRAGSIZE   BIT(8)
 
+static inline bool sk_is_inet(struct sock *sk)
+{
+       return sk->sk_family == AF_INET || sk->sk_family == AF_INET6;
+}
+
 /**
  * sk_to_full_sk - Access to a full socket
  * @sk: pointer to a socket
@@ -369,7 +375,7 @@ static inline bool inet_get_convert_csum(struct sock *sk)
 static inline bool inet_can_nonlocal_bind(struct net *net,
                                          struct inet_sock *inet)
 {
-       return net->ipv4.sysctl_ip_nonlocal_bind ||
+       return READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind) ||
                inet->freebind || inet->transparent;
 }
 
index 26fffda..1c979fd 100644 (file)
@@ -357,7 +357,7 @@ static inline bool sysctl_dev_name_is_allowed(const char *name)
 
 static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
 {
-       return port < net->ipv4.sysctl_ip_prot_sock;
+       return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock);
 }
 
 #else
@@ -384,7 +384,7 @@ void ipfrag_init(void);
 void ip_static_sysctl_init(void);
 
 #define IP4_REPLY_MARK(net, mark) \
-       ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
+       (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0)
 
 static inline bool ip_is_fragment(const struct iphdr *iph)
 {
@@ -446,7 +446,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
        struct net *net = dev_net(dst->dev);
        unsigned int mtu;
 
-       if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
+       if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
            ip_mtu_locked(dst) ||
            !forwarding) {
                mtu = rt->rt_pmtu;
index ebadb21..47642b0 100644 (file)
@@ -6960,10 +6960,11 @@ ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
  * @color_bitmap: a 64 bit bitmap representing the colors that the local BSS is
  *     aware of.
+ * @gfp: allocation flags
  */
 void
 ieeee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
-                                      u64 color_bitmap);
+                                      u64 color_bitmap, gfp_t gfp);
 
 /**
  * ieee80211_is_tx_data - check if frame is a data frame
index 279ae0f..64cf655 100644 (file)
@@ -657,18 +657,22 @@ static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
        tmpl->len = sizeof(struct nft_set_ext);
 }
 
-static inline void nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id,
-                                         unsigned int len)
+static inline int nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id,
+                                        unsigned int len)
 {
        tmpl->len        = ALIGN(tmpl->len, nft_set_ext_types[id].align);
-       BUG_ON(tmpl->len > U8_MAX);
+       if (tmpl->len > U8_MAX)
+               return -EINVAL;
+
        tmpl->offset[id] = tmpl->len;
        tmpl->len       += nft_set_ext_types[id].len + len;
+
+       return 0;
 }
 
-static inline void nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id)
+static inline int nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id)
 {
-       nft_set_ext_add_length(tmpl, id, 0);
+       return nft_set_ext_add_length(tmpl, id, 0);
 }
 
 static inline void nft_set_ext_init(struct nft_set_ext *ext,
@@ -1338,24 +1342,28 @@ void nft_unregister_flowtable_type(struct nf_flowtable_type *type);
 /**
  *     struct nft_traceinfo - nft tracing information and state
  *
+ *     @trace: other struct members are initialised
+ *     @nf_trace: copy of skb->nf_trace before rule evaluation
+ *     @type: event type (enum nft_trace_types)
+ *     @skbid: hash of skb to be used as trace id
+ *     @packet_dumped: packet headers sent in a previous traceinfo message
  *     @pkt: pktinfo currently processed
  *     @basechain: base chain currently processed
  *     @chain: chain currently processed
  *     @rule:  rule that was evaluated
  *     @verdict: verdict given by rule
- *     @type: event type (enum nft_trace_types)
- *     @packet_dumped: packet headers sent in a previous traceinfo message
- *     @trace: other struct members are initialised
  */
 struct nft_traceinfo {
+       bool                            trace;
+       bool                            nf_trace;
+       bool                            packet_dumped;
+       enum nft_trace_types            type:8;
+       u32                             skbid;
        const struct nft_pktinfo        *pkt;
        const struct nft_base_chain     *basechain;
        const struct nft_chain          *chain;
        const struct nft_rule_dp        *rule;
        const struct nft_verdict        *verdict;
-       enum nft_trace_types            type;
-       bool                            packet_dumped;
-       bool                            trace;
 };
 
 void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt,
index f51c06a..6aef8cb 100644 (file)
@@ -35,8 +35,6 @@
 
 /* This is used to register protocols. */
 struct net_protocol {
-       int                     (*early_demux)(struct sk_buff *skb);
-       int                     (*early_demux_handler)(struct sk_buff *skb);
        int                     (*handler)(struct sk_buff *skb);
 
        /* This returns an error if we weren't able to handle the error. */
@@ -52,8 +50,6 @@ struct net_protocol {
 
 #if IS_ENABLED(CONFIG_IPV6)
 struct inet6_protocol {
-       void    (*early_demux)(struct sk_buff *skb);
-       void    (*early_demux_handler)(struct sk_buff *skb);
        int     (*handler)(struct sk_buff *skb);
 
        /* This returns an error if we weren't able to handle the error. */
index 8ad8df5..c51a635 100644 (file)
@@ -75,7 +75,7 @@ static inline bool raw_sk_bound_dev_eq(struct net *net, int bound_dev_if,
                                       int dif, int sdif)
 {
 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
-       return inet_bound_dev_eq(!!net->ipv4.sysctl_raw_l3mdev_accept,
+       return inet_bound_dev_eq(READ_ONCE(net->ipv4.sysctl_raw_l3mdev_accept),
                                 bound_dev_if, dif, sdif);
 #else
        return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
index 991a398..bbcf2ab 100644 (file)
@@ -373,7 +373,7 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
        struct net *net = dev_net(dst->dev);
 
        if (hoplimit == 0)
-               hoplimit = net->ipv4.sysctl_ip_default_ttl;
+               hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
        return hoplimit;
 }
 
index c585ef6..7a48991 100644 (file)
@@ -348,7 +348,6 @@ struct sk_filter;
   *    @sk_txtime_report_errors: set report errors mode for SO_TXTIME
   *    @sk_txtime_unused: unused txtime flags
   *    @ns_tracker: tracker for netns reference
-  *    @sk_bind2_node: bind node in the bhash2 table
   */
 struct sock {
        /*
@@ -538,7 +537,6 @@ struct sock {
 #endif
        struct rcu_head         sk_rcu;
        netns_tracker           ns_tracker;
-       struct hlist_node       sk_bind2_node;
 };
 
 enum sk_pacing {
@@ -819,16 +817,6 @@ static inline void sk_add_bind_node(struct sock *sk,
        hlist_add_head(&sk->sk_bind_node, list);
 }
 
-static inline void __sk_del_bind2_node(struct sock *sk)
-{
-       __hlist_del(&sk->sk_bind2_node);
-}
-
-static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list)
-{
-       hlist_add_head(&sk->sk_bind2_node, list);
-}
-
 #define sk_for_each(__sk, list) \
        hlist_for_each_entry(__sk, list, sk_node)
 #define sk_for_each_rcu(__sk, list) \
@@ -846,8 +834,6 @@ static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list)
        hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
 #define sk_for_each_bound(__sk, list) \
        hlist_for_each_entry(__sk, list, sk_bind_node)
-#define sk_for_each_bound_bhash2(__sk, list) \
-       hlist_for_each_entry(__sk, list, sk_bind2_node)
 
 /**
  * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
@@ -1543,7 +1529,7 @@ void __sk_mem_reclaim(struct sock *sk, int amount);
 /* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */
 static inline long sk_prot_mem_limits(const struct sock *sk, int index)
 {
-       long val = sk->sk_prot->sysctl_mem[index];
+       long val = READ_ONCE(sk->sk_prot->sysctl_mem[index]);
 
 #if PAGE_SIZE > SK_MEM_QUANTUM
        val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT;
@@ -2857,18 +2843,18 @@ static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
 {
        /* Does this proto have per netns sysctl_wmem ? */
        if (proto->sysctl_wmem_offset)
-               return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
+               return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset));
 
-       return *proto->sysctl_wmem;
+       return READ_ONCE(*proto->sysctl_wmem);
 }
 
 static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
 {
        /* Does this proto have per netns sysctl_rmem ? */
        if (proto->sysctl_rmem_offset)
-               return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
+               return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset));
 
-       return *proto->sysctl_rmem;
+       return READ_ONCE(*proto->sysctl_rmem);
 }
 
 /* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
index 1e99f5c..78a64e1 100644 (file)
@@ -932,7 +932,7 @@ extern const struct inet_connection_sock_af_ops ipv6_specific;
 
 INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
 INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
-INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb));
+void tcp_v6_early_demux(struct sk_buff *skb);
 
 #endif
 
@@ -1403,8 +1403,8 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        s32 delta;
 
-       if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
-           ca_ops->cong_control)
+       if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
+           tp->packets_out || ca_ops->cong_control)
                return;
        delta = tcp_jiffies32 - tp->lsndtime;
        if (delta > inet_csk(sk)->icsk_rto)
@@ -1419,7 +1419,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space,
 
 static inline int tcp_win_from_space(const struct sock *sk, int space)
 {
-       int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
+       int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
 
        return tcp_adv_win_scale <= 0 ?
                (space>>(-tcp_adv_win_scale)) :
@@ -1493,21 +1493,24 @@ static inline int keepalive_intvl_when(const struct tcp_sock *tp)
 {
        struct net *net = sock_net((struct sock *)tp);
 
-       return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
+       return tp->keepalive_intvl ? :
+               READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
 }
 
 static inline int keepalive_time_when(const struct tcp_sock *tp)
 {
        struct net *net = sock_net((struct sock *)tp);
 
-       return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
+       return tp->keepalive_time ? :
+               READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
 }
 
 static inline int keepalive_probes(const struct tcp_sock *tp)
 {
        struct net *net = sock_net((struct sock *)tp);
 
-       return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
+       return tp->keepalive_probes ? :
+               READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
 }
 
 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
@@ -1520,7 +1523,8 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
 
 static inline int tcp_fin_time(const struct sock *sk)
 {
-       int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
+       int fin_timeout = tcp_sk(sk)->linger2 ? :
+               READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
        const int rto = inet_csk(sk)->icsk_rto;
 
        if (fin_timeout < (rto << 2) - (rto >> 1))
@@ -2023,7 +2027,7 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
 {
        struct net *net = sock_net((struct sock *)tp);
-       return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
+       return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
 }
 
 bool tcp_stream_memory_free(const struct sock *sk, int wake);
index 8017f17..8bd938f 100644 (file)
@@ -704,7 +704,7 @@ int tls_sw_fallback_init(struct sock *sk,
                         struct tls_crypto_info *crypto_info);
 
 #ifdef CONFIG_TLS_DEVICE
-void tls_device_init(void);
+int tls_device_init(void);
 void tls_device_cleanup(void);
 void tls_device_sk_destruct(struct sock *sk);
 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
@@ -724,7 +724,7 @@ static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
        return tls_get_ctx(sk)->rx_conf == TLS_HW;
 }
 #else
-static inline void tls_device_init(void) {}
+static inline int tls_device_init(void) { return 0; }
 static inline void tls_device_cleanup(void) {}
 
 static inline int
index b83a003..8dd4aa1 100644 (file)
@@ -167,7 +167,7 @@ static inline void udp_csum_pull_header(struct sk_buff *skb)
 typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport,
                                     __be16 dport);
 
-INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *));
+void udp_v6_early_demux(struct sk_buff *skb);
 INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
 
 struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
@@ -238,7 +238,7 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
                                       int dif, int sdif)
 {
 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
-       return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
+       return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_udp_l3mdev_accept),
                                 bound_dev_if, dif, sdif);
 #else
        return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
index f20f5f8..b276dcb 100644 (file)
@@ -408,8 +408,6 @@ struct snd_soc_jack_pin;
 
 struct snd_soc_jack_gpio;
 
-typedef int (*hw_write_t)(void *,const char* ,int);
-
 enum snd_soc_pcm_subclass {
        SND_SOC_PCM_CLASS_PCM   = 0,
        SND_SOC_PCM_CLASS_BE    = 1,
index 32088c6..bad2122 100644 (file)
 /* note: we begin tracing dlm_lock_start() only if ls and lkb are found */
 TRACE_EVENT(dlm_lock_start,
 
-       TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, int mode,
-                __u32 flags),
+       TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, void *name,
+                unsigned int namelen, int mode, __u32 flags),
 
-       TP_ARGS(ls, lkb, mode, flags),
+       TP_ARGS(ls, lkb, name, namelen, mode, flags),
 
        TP_STRUCT__entry(
                __field(__u32, ls_id)
                __field(__u32, lkb_id)
                __field(int, mode)
                __field(__u32, flags)
+               __dynamic_array(unsigned char, res_name,
+                               lkb->lkb_resource ? lkb->lkb_resource->res_length : namelen)
        ),
 
        TP_fast_assign(
+               struct dlm_rsb *r;
+
                __entry->ls_id = ls->ls_global_id;
                __entry->lkb_id = lkb->lkb_id;
                __entry->mode = mode;
                __entry->flags = flags;
+
+               r = lkb->lkb_resource;
+               if (r)
+                       memcpy(__get_dynamic_array(res_name), r->res_name,
+                              __get_dynamic_array_len(res_name));
+               else if (name)
+                       memcpy(__get_dynamic_array(res_name), name,
+                              __get_dynamic_array_len(res_name));
        ),
 
-       TP_printk("ls_id=%u lkb_id=%x mode=%s flags=%s",
+       TP_printk("ls_id=%u lkb_id=%x mode=%s flags=%s res_name=%s",
                  __entry->ls_id, __entry->lkb_id,
                  show_lock_mode(__entry->mode),
-                 show_lock_flags(__entry->flags))
+                 show_lock_flags(__entry->flags),
+                 __print_hex_str(__get_dynamic_array(res_name),
+                                 __get_dynamic_array_len(res_name)))
 
 );
 
 TRACE_EVENT(dlm_lock_end,
 
-       TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, int mode, __u32 flags,
-                int error),
+       TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, void *name,
+                unsigned int namelen, int mode, __u32 flags, int error),
 
-       TP_ARGS(ls, lkb, mode, flags, error),
+       TP_ARGS(ls, lkb, name, namelen, mode, flags, error),
 
        TP_STRUCT__entry(
                __field(__u32, ls_id)
@@ -88,14 +102,26 @@ TRACE_EVENT(dlm_lock_end,
                __field(int, mode)
                __field(__u32, flags)
                __field(int, error)
+               __dynamic_array(unsigned char, res_name,
+                               lkb->lkb_resource ? lkb->lkb_resource->res_length : namelen)
        ),
 
        TP_fast_assign(
+               struct dlm_rsb *r;
+
                __entry->ls_id = ls->ls_global_id;
                __entry->lkb_id = lkb->lkb_id;
                __entry->mode = mode;
                __entry->flags = flags;
 
+               r = lkb->lkb_resource;
+               if (r)
+                       memcpy(__get_dynamic_array(res_name), r->res_name,
+                              __get_dynamic_array_len(res_name));
+               else if (name)
+                       memcpy(__get_dynamic_array(res_name), name,
+                              __get_dynamic_array_len(res_name));
+
                /* return value will be zeroed in those cases by dlm_lock()
                 * we do it here again to not introduce more overhead if
                 * trace isn't running and error reflects the return value.
@@ -104,12 +130,15 @@ TRACE_EVENT(dlm_lock_end,
                        __entry->error = 0;
                else
                        __entry->error = error;
+
        ),
 
-       TP_printk("ls_id=%u lkb_id=%x mode=%s flags=%s error=%d",
+       TP_printk("ls_id=%u lkb_id=%x mode=%s flags=%s error=%d res_name=%s",
                  __entry->ls_id, __entry->lkb_id,
                  show_lock_mode(__entry->mode),
-                 show_lock_flags(__entry->flags), __entry->error)
+                 show_lock_flags(__entry->flags), __entry->error,
+                 __print_hex_str(__get_dynamic_array(res_name),
+                                 __get_dynamic_array_len(res_name)))
 
 );
 
@@ -123,42 +152,65 @@ TRACE_EVENT(dlm_bast,
                __field(__u32, ls_id)
                __field(__u32, lkb_id)
                __field(int, mode)
+               __dynamic_array(unsigned char, res_name,
+                               lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
        ),
 
        TP_fast_assign(
+               struct dlm_rsb *r;
+
                __entry->ls_id = ls->ls_global_id;
                __entry->lkb_id = lkb->lkb_id;
                __entry->mode = mode;
+
+               r = lkb->lkb_resource;
+               if (r)
+                       memcpy(__get_dynamic_array(res_name), r->res_name,
+                              __get_dynamic_array_len(res_name));
        ),
 
-       TP_printk("ls_id=%u lkb_id=%x mode=%s", __entry->ls_id,
-                 __entry->lkb_id, show_lock_mode(__entry->mode))
+       TP_printk("ls_id=%u lkb_id=%x mode=%s res_name=%s",
+                 __entry->ls_id, __entry->lkb_id,
+                 show_lock_mode(__entry->mode),
+                 __print_hex_str(__get_dynamic_array(res_name),
+                                 __get_dynamic_array_len(res_name)))
 
 );
 
 TRACE_EVENT(dlm_ast,
 
-       TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, struct dlm_lksb *lksb),
+       TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb),
 
-       TP_ARGS(ls, lkb, lksb),
+       TP_ARGS(ls, lkb),
 
        TP_STRUCT__entry(
                __field(__u32, ls_id)
                __field(__u32, lkb_id)
                __field(u8, sb_flags)
                __field(int, sb_status)
+               __dynamic_array(unsigned char, res_name,
+                               lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
        ),
 
        TP_fast_assign(
+               struct dlm_rsb *r;
+
                __entry->ls_id = ls->ls_global_id;
                __entry->lkb_id = lkb->lkb_id;
-               __entry->sb_flags = lksb->sb_flags;
-               __entry->sb_status = lksb->sb_status;
+               __entry->sb_flags = lkb->lkb_lksb->sb_flags;
+               __entry->sb_status = lkb->lkb_lksb->sb_status;
+
+               r = lkb->lkb_resource;
+               if (r)
+                       memcpy(__get_dynamic_array(res_name), r->res_name,
+                              __get_dynamic_array_len(res_name));
        ),
 
-       TP_printk("ls_id=%u lkb_id=%x sb_flags=%s sb_status=%d",
+       TP_printk("ls_id=%u lkb_id=%x sb_flags=%s sb_status=%d res_name=%s",
                  __entry->ls_id, __entry->lkb_id,
-                 show_dlm_sb_flags(__entry->sb_flags), __entry->sb_status)
+                 show_dlm_sb_flags(__entry->sb_flags), __entry->sb_status,
+                 __print_hex_str(__get_dynamic_array(res_name),
+                                 __get_dynamic_array_len(res_name)))
 
 );
 
@@ -173,17 +225,28 @@ TRACE_EVENT(dlm_unlock_start,
                __field(__u32, ls_id)
                __field(__u32, lkb_id)
                __field(__u32, flags)
+               __dynamic_array(unsigned char, res_name,
+                               lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
        ),
 
        TP_fast_assign(
+               struct dlm_rsb *r;
+
                __entry->ls_id = ls->ls_global_id;
                __entry->lkb_id = lkb->lkb_id;
                __entry->flags = flags;
+
+               r = lkb->lkb_resource;
+               if (r)
+                       memcpy(__get_dynamic_array(res_name), r->res_name,
+                              __get_dynamic_array_len(res_name));
        ),
 
-       TP_printk("ls_id=%u lkb_id=%x flags=%s",
+       TP_printk("ls_id=%u lkb_id=%x flags=%s res_name=%s",
                  __entry->ls_id, __entry->lkb_id,
-                 show_lock_flags(__entry->flags))
+                 show_lock_flags(__entry->flags),
+                 __print_hex_str(__get_dynamic_array(res_name),
+                                 __get_dynamic_array_len(res_name)))
 
 );
 
@@ -199,18 +262,29 @@ TRACE_EVENT(dlm_unlock_end,
                __field(__u32, lkb_id)
                __field(__u32, flags)
                __field(int, error)
+               __dynamic_array(unsigned char, res_name,
+                               lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
        ),
 
        TP_fast_assign(
+               struct dlm_rsb *r;
+
                __entry->ls_id = ls->ls_global_id;
                __entry->lkb_id = lkb->lkb_id;
                __entry->flags = flags;
                __entry->error = error;
+
+               r = lkb->lkb_resource;
+               if (r)
+                       memcpy(__get_dynamic_array(res_name), r->res_name,
+                              __get_dynamic_array_len(res_name));
        ),
 
-       TP_printk("ls_id=%u lkb_id=%x flags=%s error=%d",
+       TP_printk("ls_id=%u lkb_id=%x flags=%s error=%d res_name=%s",
                  __entry->ls_id, __entry->lkb_id,
-                 show_lock_flags(__entry->flags), __entry->error)
+                 show_lock_flags(__entry->flags), __entry->error,
+                 __print_hex_str(__get_dynamic_array(res_name),
+                                 __get_dynamic_array_len(res_name)))
 
 );
 
index 66fcc5a..aa2f951 100644 (file)
@@ -158,6 +158,8 @@ TRACE_EVENT(io_uring_queue_async_work,
                __field(  unsigned int,                 flags           )
                __field(  struct io_wq_work *,          work            )
                __field(  int,                          rw              )
+
+               __string( op_str, io_uring_get_opcode(opcode)   )
        ),
 
        TP_fast_assign(
@@ -168,11 +170,13 @@ TRACE_EVENT(io_uring_queue_async_work,
                __entry->opcode         = opcode;
                __entry->work           = work;
                __entry->rw             = rw;
+
+               __assign_str(op_str, io_uring_get_opcode(opcode));
        ),
 
        TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%x, %s queue, work %p",
                __entry->ctx, __entry->req, __entry->user_data,
-               io_uring_get_opcode(__entry->opcode),
+               __get_str(op_str),
                __entry->flags, __entry->rw ? "hashed" : "normal", __entry->work)
 );
 
@@ -198,6 +202,8 @@ TRACE_EVENT(io_uring_defer,
                __field(  void *,               req     )
                __field(  unsigned long long,   data    )
                __field(  u8,                   opcode  )
+
+               __string( op_str, io_uring_get_opcode(opcode) )
        ),
 
        TP_fast_assign(
@@ -205,11 +211,13 @@ TRACE_EVENT(io_uring_defer,
                __entry->req    = req;
                __entry->data   = user_data;
                __entry->opcode = opcode;
+
+               __assign_str(op_str, io_uring_get_opcode(opcode));
        ),
 
        TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s",
                __entry->ctx, __entry->req, __entry->data,
-               io_uring_get_opcode(__entry->opcode))
+               __get_str(op_str))
 );
 
 /**
@@ -298,6 +306,8 @@ TRACE_EVENT(io_uring_fail_link,
                __field(  unsigned long long,   user_data       )
                __field(  u8,                   opcode          )
                __field(  void *,               link            )
+
+               __string( op_str, io_uring_get_opcode(opcode) )
        ),
 
        TP_fast_assign(
@@ -306,11 +316,13 @@ TRACE_EVENT(io_uring_fail_link,
                __entry->user_data      = user_data;
                __entry->opcode         = opcode;
                __entry->link           = link;
+
+               __assign_str(op_str, io_uring_get_opcode(opcode));
        ),
 
        TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p",
                __entry->ctx, __entry->req, __entry->user_data,
-               io_uring_get_opcode(__entry->opcode), __entry->link)
+               __get_str(op_str), __entry->link)
 );
 
 /**
@@ -390,6 +402,8 @@ TRACE_EVENT(io_uring_submit_sqe,
                __field(  u32,                  flags           )
                __field(  bool,                 force_nonblock  )
                __field(  bool,                 sq_thread       )
+
+               __string( op_str, io_uring_get_opcode(opcode) )
        ),
 
        TP_fast_assign(
@@ -400,11 +414,13 @@ TRACE_EVENT(io_uring_submit_sqe,
                __entry->flags          = flags;
                __entry->force_nonblock = force_nonblock;
                __entry->sq_thread      = sq_thread;
+
+               __assign_str(op_str, io_uring_get_opcode(opcode));
        ),
 
        TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, "
                  "non block %d, sq_thread %d", __entry->ctx, __entry->req,
-                 __entry->user_data, io_uring_get_opcode(__entry->opcode),
+                 __entry->user_data, __get_str(op_str),
                  __entry->flags, __entry->force_nonblock, __entry->sq_thread)
 );
 
@@ -435,6 +451,8 @@ TRACE_EVENT(io_uring_poll_arm,
                __field(  u8,                   opcode          )
                __field(  int,                  mask            )
                __field(  int,                  events          )
+
+               __string( op_str, io_uring_get_opcode(opcode) )
        ),
 
        TP_fast_assign(
@@ -444,11 +462,13 @@ TRACE_EVENT(io_uring_poll_arm,
                __entry->opcode         = opcode;
                __entry->mask           = mask;
                __entry->events         = events;
+
+               __assign_str(op_str, io_uring_get_opcode(opcode));
        ),
 
        TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x",
                  __entry->ctx, __entry->req, __entry->user_data,
-                 io_uring_get_opcode(__entry->opcode),
+                 __get_str(op_str),
                  __entry->mask, __entry->events)
 );
 
@@ -474,6 +494,8 @@ TRACE_EVENT(io_uring_task_add,
                __field(  unsigned long long,   user_data       )
                __field(  u8,                   opcode          )
                __field(  int,                  mask            )
+
+               __string( op_str, io_uring_get_opcode(opcode) )
        ),
 
        TP_fast_assign(
@@ -482,11 +504,13 @@ TRACE_EVENT(io_uring_task_add,
                __entry->user_data      = user_data;
                __entry->opcode         = opcode;
                __entry->mask           = mask;
+
+               __assign_str(op_str, io_uring_get_opcode(opcode));
        ),
 
        TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x",
                __entry->ctx, __entry->req, __entry->user_data,
-               io_uring_get_opcode(__entry->opcode),
+               __get_str(op_str),
                __entry->mask)
 );
 
@@ -523,6 +547,8 @@ TRACE_EVENT(io_uring_req_failed,
                __field( u64,                   pad1            )
                __field( u64,                   addr3           )
                __field( int,                   error           )
+
+               __string( op_str, io_uring_get_opcode(sqe->opcode) )
        ),
 
        TP_fast_assign(
@@ -542,6 +568,8 @@ TRACE_EVENT(io_uring_req_failed,
                __entry->pad1           = sqe->__pad2[0];
                __entry->addr3          = sqe->addr3;
                __entry->error          = error;
+
+               __assign_str(op_str, io_uring_get_opcode(sqe->opcode));
        ),
 
        TP_printk("ring %p, req %p, user_data 0x%llx, "
@@ -550,7 +578,7 @@ TRACE_EVENT(io_uring_req_failed,
                  "personality=%d, file_index=%d, pad=0x%llx, addr3=%llx, "
                  "error=%d",
                  __entry->ctx, __entry->req, __entry->user_data,
-                 io_uring_get_opcode(__entry->opcode),
+                 __get_str(op_str),
                  __entry->flags, __entry->ioprio,
                  (unsigned long long)__entry->off,
                  (unsigned long long) __entry->addr, __entry->len,
index e282ce0..6d1626e 100644 (file)
@@ -160,7 +160,7 @@ TRACE_EVENT(iocost_ioc_vrate_adj,
 
        TP_fast_assign(
                __assign_str(devname, ioc_name(ioc));
-               __entry->old_vrate = atomic64_read(&ioc->vtime_rate);;
+               __entry->old_vrate = atomic64_read(&ioc->vtime_rate);
                __entry->new_vrate = new_vrate;
                __entry->busy_level = ioc->busy_level;
                __entry->read_missed_ppm = missed_ppm[READ];
index f766683..4cb51ac 100644 (file)
@@ -13,11 +13,12 @@ DECLARE_EVENT_CLASS(kmem_alloc,
 
        TP_PROTO(unsigned long call_site,
                 const void *ptr,
+                struct kmem_cache *s,
                 size_t bytes_req,
                 size_t bytes_alloc,
                 gfp_t gfp_flags),
 
-       TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
+       TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags),
 
        TP_STRUCT__entry(
                __field(        unsigned long,  call_site       )
@@ -25,6 +26,7 @@ DECLARE_EVENT_CLASS(kmem_alloc,
                __field(        size_t,         bytes_req       )
                __field(        size_t,         bytes_alloc     )
                __field(        unsigned long,  gfp_flags       )
+               __field(        bool,           accounted       )
        ),
 
        TP_fast_assign(
@@ -33,42 +35,47 @@ DECLARE_EVENT_CLASS(kmem_alloc,
                __entry->bytes_req      = bytes_req;
                __entry->bytes_alloc    = bytes_alloc;
                __entry->gfp_flags      = (__force unsigned long)gfp_flags;
+               __entry->accounted      = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
+                                         ((gfp_flags & __GFP_ACCOUNT) ||
+                                         (s && s->flags & SLAB_ACCOUNT)) : false;
        ),
 
-       TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
+       TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s accounted=%s",
                (void *)__entry->call_site,
                __entry->ptr,
                __entry->bytes_req,
                __entry->bytes_alloc,
-               show_gfp_flags(__entry->gfp_flags))
+               show_gfp_flags(__entry->gfp_flags),
+               __entry->accounted ? "true" : "false")
 );
 
 DEFINE_EVENT(kmem_alloc, kmalloc,
 
-       TP_PROTO(unsigned long call_site, const void *ptr,
+       TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
                 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
 
-       TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
+       TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
 );
 
 DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
 
-       TP_PROTO(unsigned long call_site, const void *ptr,
+       TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
                 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
 
-       TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
+       TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
 );
 
 DECLARE_EVENT_CLASS(kmem_alloc_node,
 
        TP_PROTO(unsigned long call_site,
                 const void *ptr,
+                struct kmem_cache *s,
                 size_t bytes_req,
                 size_t bytes_alloc,
                 gfp_t gfp_flags,
                 int node),
 
-       TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
+       TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node),
 
        TP_STRUCT__entry(
                __field(        unsigned long,  call_site       )
@@ -77,6 +84,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
                __field(        size_t,         bytes_alloc     )
                __field(        unsigned long,  gfp_flags       )
                __field(        int,            node            )
+               __field(        bool,           accounted       )
        ),
 
        TP_fast_assign(
@@ -86,33 +94,37 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
                __entry->bytes_alloc    = bytes_alloc;
                __entry->gfp_flags      = (__force unsigned long)gfp_flags;
                __entry->node           = node;
+               __entry->accounted      = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
+                                         ((gfp_flags & __GFP_ACCOUNT) ||
+                                         (s && s->flags & SLAB_ACCOUNT)) : false;
        ),
 
-       TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
+       TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
                (void *)__entry->call_site,
                __entry->ptr,
                __entry->bytes_req,
                __entry->bytes_alloc,
                show_gfp_flags(__entry->gfp_flags),
-               __entry->node)
+               __entry->node,
+               __entry->accounted ? "true" : "false")
 );
 
 DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
 
        TP_PROTO(unsigned long call_site, const void *ptr,
-                size_t bytes_req, size_t bytes_alloc,
+                struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
                 gfp_t gfp_flags, int node),
 
-       TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
+       TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
 );
 
 DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
 
        TP_PROTO(unsigned long call_site, const void *ptr,
-                size_t bytes_req, size_t bytes_alloc,
+                struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
                 gfp_t gfp_flags, int node),
 
-       TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
+       TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
 );
 
 TRACE_EVENT(kfree,
index d4e631a..6025dd8 100644 (file)
@@ -288,6 +288,7 @@ DECLARE_EVENT_CLASS(ata_qc_complete_template,
                __entry->hob_feature    = qc->result_tf.hob_feature;
                __entry->nsect          = qc->result_tf.nsect;
                __entry->hob_nsect      = qc->result_tf.hob_nsect;
+               __entry->flags          = qc->flags;
        ),
 
        TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s status=%s " \
index 12c3157..777ee6c 100644 (file)
@@ -98,7 +98,7 @@ TRACE_EVENT(sock_exceed_buf_limit,
 
        TP_STRUCT__entry(
                __array(char, name, 32)
-               __field(long *, sysctl_mem)
+               __array(long, sysctl_mem, 3)
                __field(long, allocated)
                __field(int, sysctl_rmem)
                __field(int, rmem_alloc)
@@ -110,7 +110,9 @@ TRACE_EVENT(sock_exceed_buf_limit,
 
        TP_fast_assign(
                strncpy(__entry->name, prot->name, 32);
-               __entry->sysctl_mem = prot->sysctl_mem;
+               __entry->sysctl_mem[0] = READ_ONCE(prot->sysctl_mem[0]);
+               __entry->sysctl_mem[1] = READ_ONCE(prot->sysctl_mem[1]);
+               __entry->sysctl_mem[2] = READ_ONCE(prot->sysctl_mem[2]);
                __entry->allocated = allocated;
                __entry->sysctl_rmem = sk_get_rmem0(sk, prot);
                __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
index f13d37b..1ecdb91 100644 (file)
@@ -192,6 +192,7 @@ struct f_owner_ex {
 
 #define F_LINUX_SPECIFIC_BASE  1024
 
+#ifndef HAVE_ARCH_STRUCT_FLOCK
 struct flock {
        short   l_type;
        short   l_whence;
@@ -216,5 +217,6 @@ struct flock64 {
        __ARCH_FLOCK64_PAD
 #endif
 };
+#endif /* HAVE_ARCH_STRUCT_FLOCK */
 
 #endif /* _ASM_GENERIC_FCNTL_H */
index f197215..0980678 100644 (file)
@@ -1444,11 +1444,11 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
 #define AMD_FMT_MOD_PIPE_MASK 0x7
 
 #define AMD_FMT_MOD_SET(field, value) \
-       ((uint64_t)(value) << AMD_FMT_MOD_##field##_SHIFT)
+       ((__u64)(value) << AMD_FMT_MOD_##field##_SHIFT)
 #define AMD_FMT_MOD_GET(field, value) \
        (((value) >> AMD_FMT_MOD_##field##_SHIFT) & AMD_FMT_MOD_##field##_MASK)
 #define AMD_FMT_MOD_CLEAR(field) \
-       (~((uint64_t)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
+       (~((__u64)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
 
 #if defined(__cplusplus)
 }
index f4009db..ef78e0e 100644 (file)
@@ -5222,22 +5222,25 @@ union bpf_attr {
  *     Return
  *             Nothing. Always succeeds.
  *
- * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset)
+ * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset, u64 flags)
  *     Description
  *             Read *len* bytes from *src* into *dst*, starting from *offset*
  *             into *src*.
+ *             *flags* is currently unused.
  *     Return
  *             0 on success, -E2BIG if *offset* + *len* exceeds the length
- *             of *src*'s data, -EINVAL if *src* is an invalid dynptr.
+ *             of *src*'s data, -EINVAL if *src* is an invalid dynptr or if
+ *             *flags* is not 0.
  *
- * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len)
+ * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags)
  *     Description
  *             Write *len* bytes from *src* into *dst*, starting from *offset*
  *             into *dst*.
+ *             *flags* is currently unused.
  *     Return
  *             0 on success, -E2BIG if *offset* + *len* exceeds the length
  *             of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
- *             is a read-only dynptr.
+ *             is a read-only dynptr or if *flags* is not 0.
  *
  * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len)
  *     Description
index f1f8913..d8536d7 100644 (file)
 #define FAN_MARK_FLUSH         0x00000080
 /* FAN_MARK_FILESYSTEM is      0x00000100 */
 #define FAN_MARK_EVICTABLE     0x00000200
+/* This bit is mutually exclusive with FAN_MARK_IGNORED_MASK bit */
+#define FAN_MARK_IGNORE                0x00000400
 
 /* These are NOT bitwise flags.  Both bits can be used togther.  */
 #define FAN_MARK_INODE         0x00000000
 #define FAN_MARK_MOUNT         0x00000010
 #define FAN_MARK_FILESYSTEM    0x00000100
 
+/*
+ * Convenience macro - FAN_MARK_IGNORE requires FAN_MARK_IGNORED_SURV_MODIFY
+ * for non-inode mark types.
+ */
+#define FAN_MARK_IGNORE_SURV   (FAN_MARK_IGNORE | FAN_MARK_IGNORED_SURV_MODIFY)
+
 /* Deprecated - do not use this in programs and do not add new flags here! */
 #define FAN_ALL_MARK_FLAGS     (FAN_MARK_ADD |\
                                 FAN_MARK_REMOVE |\
index ef4257a..2557eb7 100644 (file)
@@ -78,10 +78,13 @@ struct input_id {
  * Note that input core does not clamp reported values to the
  * [minimum, maximum] limits, such task is left to userspace.
  *
- * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z)
- * is reported in units per millimeter (units/mm), resolution
- * for rotational axes (ABS_RX, ABS_RY, ABS_RZ) is reported
- * in units per radian.
+ * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z,
+ * ABS_MT_POSITION_X, ABS_MT_POSITION_Y) is reported in units
+ * per millimeter (units/mm), resolution for rotational axes
+ * (ABS_RX, ABS_RY, ABS_RZ) is reported in units per radian.
+ * The resolution for the size axes (ABS_MT_TOUCH_MAJOR,
+ * ABS_MT_TOUCH_MINOR, ABS_MT_WIDTH_MAJOR, ABS_MT_WIDTH_MINOR)
+ * is reported in units per millimeter (units/mm).
  * When INPUT_PROP_ACCELEROMETER is set the resolution changes.
  * The main axes (ABS_X, ABS_Y, ABS_Z) are then reported in
  * units per g (units/g) and in units per degree per second
index 776e027..0ad3da2 100644 (file)
@@ -22,7 +22,10 @@ struct io_uring_sqe {
        union {
                __u64   off;    /* offset into file */
                __u64   addr2;
-               __u32   cmd_op;
+               struct {
+                       __u32   cmd_op;
+                       __u32   __pad1;
+               };
        };
        union {
                __u64   addr;   /* pointer to buffer or iovecs */
@@ -47,7 +50,6 @@ struct io_uring_sqe {
                __u32           unlink_flags;
                __u32           hardlink_flags;
                __u32           xattr_flags;
-               __u32           close_flags;
        };
        __u64   user_data;      /* data to be passed back at completion time */
        /* pack this to avoid bogus arm OABI complaints */
@@ -245,7 +247,7 @@ enum io_uring_op {
 #define IORING_ASYNC_CANCEL_ANY        (1U << 2)
 
 /*
- * send/sendmsg and recv/recvmsg flags (sqe->addr2)
+ * send/sendmsg and recv/recvmsg flags (sqe->ioprio)
  *
  * IORING_RECVSEND_POLL_FIRST  If set, instead of first attempting to send
  *                             or receive and arm poll if that yields an
@@ -260,11 +262,6 @@ enum io_uring_op {
 #define IORING_ACCEPT_MULTISHOT        (1U << 0)
 
 /*
- * close flags, store in sqe->close_flags
- */
-#define IORING_CLOSE_FD_AND_FILE_SLOT  (1U << 0)
-
-/*
  * IO completion data structure (Completion Queue Entry)
  */
 struct io_uring_cqe {
index 5088bd9..860f867 100644 (file)
@@ -2083,7 +2083,8 @@ struct kvm_stats_header {
 #define KVM_STATS_UNIT_BYTES           (0x1 << KVM_STATS_UNIT_SHIFT)
 #define KVM_STATS_UNIT_SECONDS         (0x2 << KVM_STATS_UNIT_SHIFT)
 #define KVM_STATS_UNIT_CYCLES          (0x3 << KVM_STATS_UNIT_SHIFT)
-#define KVM_STATS_UNIT_MAX             KVM_STATS_UNIT_CYCLES
+#define KVM_STATS_UNIT_BOOLEAN         (0x4 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_MAX             KVM_STATS_UNIT_BOOLEAN
 
 #define KVM_STATS_BASE_SHIFT           8
 #define KVM_STATS_BASE_MASK            (0xF << KVM_STATS_BASE_SHIFT)
index 9219635..dfe19bf 100644 (file)
@@ -2,16 +2,17 @@
 #ifndef _UAPI_MPTCP_H
 #define _UAPI_MPTCP_H
 
+#ifndef __KERNEL__
+#include <netinet/in.h>                /* for sockaddr_in and sockaddr_in6     */
+#include <sys/socket.h>                /* for struct sockaddr                  */
+#endif
+
 #include <linux/const.h>
 #include <linux/types.h>
 #include <linux/in.h>          /* for sockaddr_in                      */
 #include <linux/in6.h>         /* for sockaddr_in6                     */
 #include <linux/socket.h>      /* for sockaddr_storage and sa_family   */
 
-#ifndef __KERNEL__
-#include <sys/socket.h>                /* for struct sockaddr                  */
-#endif
-
 #define MPTCP_SUBFLOW_FLAG_MCAP_REM            _BITUL(0)
 #define MPTCP_SUBFLOW_FLAG_MCAP_LOC            _BITUL(1)
 #define MPTCP_SUBFLOW_FLAG_JOIN_REM            _BITUL(2)
index 9d0f06b..68aeae2 100644 (file)
@@ -38,8 +38,9 @@
 #define N_NULL         27      /* Null ldisc used for error handling */
 #define N_MCTP         28      /* MCTP-over-serial */
 #define N_DEVELOPMENT  29      /* Manual out-of-tree testing */
+#define N_CAN327       30      /* ELM327 based OBD-II interfaces */
 
 /* Always the newest line discipline + 1 */
-#define NR_LDISCS      30
+#define NR_LDISCS      31
 
 #endif /* _UAPI_LINUX_TTY_H */
index e1126a7..eff166f 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef __LINUX_OF_DISPLAY_TIMING_H
 #define __LINUX_OF_DISPLAY_TIMING_H
 
+#include <linux/errno.h>
+
 struct device_node;
 struct display_timing;
 struct display_timings;
index 754f323..e1fcaed 100644 (file)
@@ -64,7 +64,7 @@ static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
                goto fail_put;
 
        if (!setup_ipc_sysctls(ns))
-               goto fail_put;
+               goto fail_mq;
 
        sem_init_ns(ns);
        msg_init_ns(ns);
@@ -72,6 +72,9 @@ static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
 
        return ns;
 
+fail_mq:
+       retire_mq_sysctls(ns);
+
 fail_put:
        put_user_ns(ns->user_ns);
        ns_free_inum(&ns->ns);
index f3a2abd..3a8c9d7 100644 (file)
@@ -1014,10 +1014,10 @@ static void audit_reset_context(struct audit_context *ctx)
        ctx->target_comm[0] = '\0';
        unroll_tree_refs(ctx, NULL, 0);
        WARN_ON(!list_empty(&ctx->killed_trees));
-       ctx->type = 0;
        audit_free_module(ctx);
        ctx->fds[0] = -1;
        audit_proctitle_free(ctx);
+       ctx->type = 0; /* reset last for audit_free_*() */
 }
 
 static inline struct audit_context *audit_alloc_context(enum audit_state state)
index 63d0ac7..eb12d4f 100644 (file)
@@ -4815,6 +4815,7 @@ static int btf_check_type_tags(struct btf_verifier_env *env,
        n = btf_nr_types(btf);
        for (i = start_id; i < n; i++) {
                const struct btf_type *t;
+               int chain_limit = 32;
                u32 cur_id = i;
 
                t = btf_type_by_id(btf, i);
@@ -4827,6 +4828,10 @@ static int btf_check_type_tags(struct btf_verifier_env *env,
 
                in_tags = btf_type_is_type_tag(t);
                while (btf_type_is_modifier(t)) {
+                       if (!chain_limit--) {
+                               btf_verifier_log(env, "Max chain length or cycle detected");
+                               return -ELOOP;
+                       }
                        if (btf_type_is_type_tag(t)) {
                                if (!in_tags) {
                                        btf_verifier_log(env, "Type tags don't precede modifiers");
index 5f6f3f8..e796150 100644 (file)
@@ -68,11 +68,13 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
 {
        u8 *ptr = NULL;
 
-       if (k >= SKF_NET_OFF)
+       if (k >= SKF_NET_OFF) {
                ptr = skb_network_header(skb) + k - SKF_NET_OFF;
-       else if (k >= SKF_LL_OFF)
+       } else if (k >= SKF_LL_OFF) {
+               if (unlikely(!skb_mac_header_was_set(skb)))
+                       return NULL;
                ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
-
+       }
        if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
                return ptr;
 
index 225806a..bb1254f 100644 (file)
@@ -1497,11 +1497,12 @@ const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
        .arg4_type      = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT,
 };
 
-BPF_CALL_4(bpf_dynptr_read, void *, dst, u32, len, struct bpf_dynptr_kern *, src, u32, offset)
+BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, struct bpf_dynptr_kern *, src,
+          u32, offset, u64, flags)
 {
        int err;
 
-       if (!src->data)
+       if (!src->data || flags)
                return -EINVAL;
 
        err = bpf_dynptr_check_off_len(src, offset, len);
@@ -1521,13 +1522,15 @@ const struct bpf_func_proto bpf_dynptr_read_proto = {
        .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
        .arg3_type      = ARG_PTR_TO_DYNPTR,
        .arg4_type      = ARG_ANYTHING,
+       .arg5_type      = ARG_ANYTHING,
 };
 
-BPF_CALL_4(bpf_dynptr_write, struct bpf_dynptr_kern *, dst, u32, offset, void *, src, u32, len)
+BPF_CALL_5(bpf_dynptr_write, struct bpf_dynptr_kern *, dst, u32, offset, void *, src,
+          u32, len, u64, flags)
 {
        int err;
 
-       if (!dst->data || bpf_dynptr_is_rdonly(dst))
+       if (!dst->data || flags || bpf_dynptr_is_rdonly(dst))
                return -EINVAL;
 
        err = bpf_dynptr_check_off_len(dst, offset, len);
@@ -1547,6 +1550,7 @@ const struct bpf_func_proto bpf_dynptr_write_proto = {
        .arg2_type      = ARG_ANYTHING,
        .arg3_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
        .arg4_type      = ARG_CONST_SIZE_OR_ZERO,
+       .arg5_type      = ARG_ANYTHING,
 };
 
 BPF_CALL_3(bpf_dynptr_data, struct bpf_dynptr_kern *, ptr, u32, offset, u32, len)
index aedac2a..0efbac0 100644 (file)
@@ -1562,6 +1562,21 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
        reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
 }
 
+static void reg_bounds_sync(struct bpf_reg_state *reg)
+{
+       /* We might have learned new bounds from the var_off. */
+       __update_reg_bounds(reg);
+       /* We might have learned something about the sign bit. */
+       __reg_deduce_bounds(reg);
+       /* We might have learned some bits from the bounds. */
+       __reg_bound_offset(reg);
+       /* Intersecting with the old var_off might have improved our bounds
+        * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
+        * then new var_off is (0; 0x7f...fc) which improves our umax.
+        */
+       __update_reg_bounds(reg);
+}
+
 static bool __reg32_bound_s64(s32 a)
 {
        return a >= 0 && a <= S32_MAX;
@@ -1603,16 +1618,8 @@ static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
                 * so they do not impact tnum bounds calculation.
                 */
                __mark_reg64_unbounded(reg);
-               __update_reg_bounds(reg);
        }
-
-       /* Intersecting with the old var_off might have improved our bounds
-        * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
-        * then new var_off is (0; 0x7f...fc) which improves our umax.
-        */
-       __reg_deduce_bounds(reg);
-       __reg_bound_offset(reg);
-       __update_reg_bounds(reg);
+       reg_bounds_sync(reg);
 }
 
 static bool __reg64_bound_s32(s64 a)
@@ -1628,7 +1635,6 @@ static bool __reg64_bound_u32(u64 a)
 static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
 {
        __mark_reg32_unbounded(reg);
-
        if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
                reg->s32_min_value = (s32)reg->smin_value;
                reg->s32_max_value = (s32)reg->smax_value;
@@ -1637,14 +1643,7 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
                reg->u32_min_value = (u32)reg->umin_value;
                reg->u32_max_value = (u32)reg->umax_value;
        }
-
-       /* Intersecting with the old var_off might have improved our bounds
-        * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
-        * then new var_off is (0; 0x7f...fc) which improves our umax.
-        */
-       __reg_deduce_bounds(reg);
-       __reg_bound_offset(reg);
-       __update_reg_bounds(reg);
+       reg_bounds_sync(reg);
 }
 
 /* Mark a register as having a completely unknown (scalar) value. */
@@ -6943,9 +6942,7 @@ static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
        ret_reg->s32_max_value = meta->msize_max_value;
        ret_reg->smin_value = -MAX_ERRNO;
        ret_reg->s32_min_value = -MAX_ERRNO;
-       __reg_deduce_bounds(ret_reg);
-       __reg_bound_offset(ret_reg);
-       __update_reg_bounds(ret_reg);
+       reg_bounds_sync(ret_reg);
 }
 
 static int
@@ -8202,11 +8199,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 
        if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
                return -EINVAL;
-
-       __update_reg_bounds(dst_reg);
-       __reg_deduce_bounds(dst_reg);
-       __reg_bound_offset(dst_reg);
-
+       reg_bounds_sync(dst_reg);
        if (sanitize_check_bounds(env, insn, dst_reg) < 0)
                return -EACCES;
        if (sanitize_needed(opcode)) {
@@ -8944,10 +8937,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        /* ALU32 ops are zero extended into 64bit register */
        if (alu32)
                zext_32_to_64(dst_reg);
-
-       __update_reg_bounds(dst_reg);
-       __reg_deduce_bounds(dst_reg);
-       __reg_bound_offset(dst_reg);
+       reg_bounds_sync(dst_reg);
        return 0;
 }
 
@@ -9136,10 +9126,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                                                         insn->dst_reg);
                                }
                                zext_32_to_64(dst_reg);
-
-                               __update_reg_bounds(dst_reg);
-                               __reg_deduce_bounds(dst_reg);
-                               __reg_bound_offset(dst_reg);
+                               reg_bounds_sync(dst_reg);
                        }
                } else {
                        /* case: R = imm
@@ -9577,26 +9564,33 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
                return;
 
        switch (opcode) {
+       /* JEQ/JNE comparison doesn't change the register equivalence.
+        *
+        * r1 = r2;
+        * if (r1 == 42) goto label;
+        * ...
+        * label: // here both r1 and r2 are known to be 42.
+        *
+        * Hence when marking register as known preserve it's ID.
+        */
        case BPF_JEQ:
+               if (is_jmp32) {
+                       __mark_reg32_known(true_reg, val32);
+                       true_32off = tnum_subreg(true_reg->var_off);
+               } else {
+                       ___mark_reg_known(true_reg, val);
+                       true_64off = true_reg->var_off;
+               }
+               break;
        case BPF_JNE:
-       {
-               struct bpf_reg_state *reg =
-                       opcode == BPF_JEQ ? true_reg : false_reg;
-
-               /* JEQ/JNE comparison doesn't change the register equivalence.
-                * r1 = r2;
-                * if (r1 == 42) goto label;
-                * ...
-                * label: // here both r1 and r2 are known to be 42.
-                *
-                * Hence when marking register as known preserve it's ID.
-                */
-               if (is_jmp32)
-                       __mark_reg32_known(reg, val32);
-               else
-                       ___mark_reg_known(reg, val);
+               if (is_jmp32) {
+                       __mark_reg32_known(false_reg, val32);
+                       false_32off = tnum_subreg(false_reg->var_off);
+               } else {
+                       ___mark_reg_known(false_reg, val);
+                       false_64off = false_reg->var_off;
+               }
                break;
-       }
        case BPF_JSET:
                if (is_jmp32) {
                        false_32off = tnum_and(false_32off, tnum_const(~val32));
@@ -9735,21 +9729,8 @@ static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
                                                        dst_reg->smax_value);
        src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
                                                             dst_reg->var_off);
-       /* We might have learned new bounds from the var_off. */
-       __update_reg_bounds(src_reg);
-       __update_reg_bounds(dst_reg);
-       /* We might have learned something about the sign bit. */
-       __reg_deduce_bounds(src_reg);
-       __reg_deduce_bounds(dst_reg);
-       /* We might have learned some bits from the bounds. */
-       __reg_bound_offset(src_reg);
-       __reg_bound_offset(dst_reg);
-       /* Intersecting with the old var_off might have improved our bounds
-        * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
-        * then new var_off is (0; 0x7f...fc) which improves our umax.
-        */
-       __update_reg_bounds(src_reg);
-       __update_reg_bounds(dst_reg);
+       reg_bounds_sync(src_reg);
+       reg_bounds_sync(dst_reg);
 }
 
 static void reg_combine_min_max(struct bpf_reg_state *true_src,
index 9594cfd..08102d1 100644 (file)
@@ -281,6 +281,8 @@ static inline cfi_check_fn find_module_check_fn(unsigned long ptr)
 static inline cfi_check_fn find_check_fn(unsigned long ptr)
 {
        cfi_check_fn fn = NULL;
+       unsigned long flags;
+       bool rcu_idle;
 
        if (is_kernel_text(ptr))
                return __cfi_check;
@@ -290,13 +292,21 @@ static inline cfi_check_fn find_check_fn(unsigned long ptr)
         * the shadow and __module_address use RCU, so we need to wake it
         * up if necessary.
         */
-       RCU_NONIDLE({
-               if (IS_ENABLED(CONFIG_CFI_CLANG_SHADOW))
-                       fn = find_shadow_check_fn(ptr);
+       rcu_idle = !rcu_is_watching();
+       if (rcu_idle) {
+               local_irq_save(flags);
+               rcu_irq_enter();
+       }
+
+       if (IS_ENABLED(CONFIG_CFI_CLANG_SHADOW))
+               fn = find_shadow_check_fn(ptr);
+       if (!fn)
+               fn = find_module_check_fn(ptr);
 
-               if (!fn)
-                       fn = find_module_check_fn(ptr);
-       });
+       if (rcu_idle) {
+               rcu_irq_exit();
+               local_irq_restore(flags);
+       }
 
        return fn;
 }
index 1779ccd..13c8e91 100644 (file)
@@ -765,7 +765,8 @@ struct css_set init_css_set = {
        .task_iters             = LIST_HEAD_INIT(init_css_set.task_iters),
        .threaded_csets         = LIST_HEAD_INIT(init_css_set.threaded_csets),
        .cgrp_links             = LIST_HEAD_INIT(init_css_set.cgrp_links),
-       .mg_preload_node        = LIST_HEAD_INIT(init_css_set.mg_preload_node),
+       .mg_src_preload_node    = LIST_HEAD_INIT(init_css_set.mg_src_preload_node),
+       .mg_dst_preload_node    = LIST_HEAD_INIT(init_css_set.mg_dst_preload_node),
        .mg_node                = LIST_HEAD_INIT(init_css_set.mg_node),
 
        /*
@@ -1240,7 +1241,8 @@ static struct css_set *find_css_set(struct css_set *old_cset,
        INIT_LIST_HEAD(&cset->threaded_csets);
        INIT_HLIST_NODE(&cset->hlist);
        INIT_LIST_HEAD(&cset->cgrp_links);
-       INIT_LIST_HEAD(&cset->mg_preload_node);
+       INIT_LIST_HEAD(&cset->mg_src_preload_node);
+       INIT_LIST_HEAD(&cset->mg_dst_preload_node);
        INIT_LIST_HEAD(&cset->mg_node);
 
        /* Copy the set of subsystem state objects generated in
@@ -2597,21 +2599,27 @@ int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp)
  */
 void cgroup_migrate_finish(struct cgroup_mgctx *mgctx)
 {
-       LIST_HEAD(preloaded);
        struct css_set *cset, *tmp_cset;
 
        lockdep_assert_held(&cgroup_mutex);
 
        spin_lock_irq(&css_set_lock);
 
-       list_splice_tail_init(&mgctx->preloaded_src_csets, &preloaded);
-       list_splice_tail_init(&mgctx->preloaded_dst_csets, &preloaded);
+       list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_src_csets,
+                                mg_src_preload_node) {
+               cset->mg_src_cgrp = NULL;
+               cset->mg_dst_cgrp = NULL;
+               cset->mg_dst_cset = NULL;
+               list_del_init(&cset->mg_src_preload_node);
+               put_css_set_locked(cset);
+       }
 
-       list_for_each_entry_safe(cset, tmp_cset, &preloaded, mg_preload_node) {
+       list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_dst_csets,
+                                mg_dst_preload_node) {
                cset->mg_src_cgrp = NULL;
                cset->mg_dst_cgrp = NULL;
                cset->mg_dst_cset = NULL;
-               list_del_init(&cset->mg_preload_node);
+               list_del_init(&cset->mg_dst_preload_node);
                put_css_set_locked(cset);
        }
 
@@ -2651,7 +2659,7 @@ void cgroup_migrate_add_src(struct css_set *src_cset,
        if (src_cset->dead)
                return;
 
-       if (!list_empty(&src_cset->mg_preload_node))
+       if (!list_empty(&src_cset->mg_src_preload_node))
                return;
 
        src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
@@ -2664,7 +2672,7 @@ void cgroup_migrate_add_src(struct css_set *src_cset,
        src_cset->mg_src_cgrp = src_cgrp;
        src_cset->mg_dst_cgrp = dst_cgrp;
        get_css_set(src_cset);
-       list_add_tail(&src_cset->mg_preload_node, &mgctx->preloaded_src_csets);
+       list_add_tail(&src_cset->mg_src_preload_node, &mgctx->preloaded_src_csets);
 }
 
 /**
@@ -2689,7 +2697,7 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx)
 
        /* look up the dst cset for each src cset and link it to src */
        list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets,
-                                mg_preload_node) {
+                                mg_src_preload_node) {
                struct css_set *dst_cset;
                struct cgroup_subsys *ss;
                int ssid;
@@ -2708,7 +2716,7 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx)
                if (src_cset == dst_cset) {
                        src_cset->mg_src_cgrp = NULL;
                        src_cset->mg_dst_cgrp = NULL;
-                       list_del_init(&src_cset->mg_preload_node);
+                       list_del_init(&src_cset->mg_src_preload_node);
                        put_css_set(src_cset);
                        put_css_set(dst_cset);
                        continue;
@@ -2716,8 +2724,8 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx)
 
                src_cset->mg_dst_cset = dst_cset;
 
-               if (list_empty(&dst_cset->mg_preload_node))
-                       list_add_tail(&dst_cset->mg_preload_node,
+               if (list_empty(&dst_cset->mg_dst_preload_node))
+                       list_add_tail(&dst_cset->mg_dst_preload_node,
                                      &mgctx->preloaded_dst_csets);
                else
                        put_css_set(dst_cset);
@@ -2963,7 +2971,8 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
                goto out_finish;
 
        spin_lock_irq(&css_set_lock);
-       list_for_each_entry(src_cset, &mgctx.preloaded_src_csets, mg_preload_node) {
+       list_for_each_entry(src_cset, &mgctx.preloaded_src_csets,
+                           mg_src_preload_node) {
                struct task_struct *task, *ntask;
 
                /* all tasks in src_csets need to be migrated */
index dcd86f3..6fac5b4 100644 (file)
@@ -7,12 +7,11 @@ CONFIG_DEBUG_SLAB=y
 CONFIG_DEBUG_KMEMLEAK=y
 CONFIG_DEBUG_PAGEALLOC=y
 CONFIG_SLUB_DEBUG_ON=y
-CONFIG_KMEMCHECK=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
 CONFIG_GCOV_KERNEL=y
 CONFIG_LOCKDEP=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_SCHEDSTATS=y
-CONFIG_VMLINUX_VALIDATION=y
+CONFIG_NOINSTR_VALIDATION=y
 CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
index e978f36..8d0b68a 100644 (file)
@@ -357,7 +357,7 @@ void dma_direct_free(struct device *dev, size_t size,
        } else {
                if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
                        arch_dma_clear_uncached(cpu_addr, size);
-               if (dma_set_encrypted(dev, cpu_addr, 1 << page_order))
+               if (dma_set_encrypted(dev, cpu_addr, size))
                        return;
        }
 
@@ -392,7 +392,6 @@ void dma_direct_free_pages(struct device *dev, size_t size,
                struct page *page, dma_addr_t dma_addr,
                enum dma_data_direction dir)
 {
-       unsigned int page_order = get_order(size);
        void *vaddr = page_address(page);
 
        /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
@@ -400,7 +399,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
            dma_free_from_pool(dev, vaddr, size))
                return;
 
-       if (dma_set_encrypted(dev, vaddr, 1 << page_order))
+       if (dma_set_encrypted(dev, vaddr, size))
                return;
        __dma_direct_free_pages(dev, page, size);
 }
index 80782cd..d2b3549 100644 (file)
@@ -6253,10 +6253,10 @@ again:
 
                if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
                        /*
-                        * Raced against perf_mmap_close() through
-                        * perf_event_set_output(). Try again, hope for better
-                        * luck.
+                        * Raced against perf_mmap_close(); remove the
+                        * event and try again.
                         */
+                       ring_buffer_attach(event, NULL);
                        mutex_unlock(&event->mmap_mutex);
                        goto again;
                }
@@ -11825,14 +11825,25 @@ err_size:
        goto out;
 }
 
+static void mutex_lock_double(struct mutex *a, struct mutex *b)
+{
+       if (b < a)
+               swap(a, b);
+
+       mutex_lock(a);
+       mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
+}
+
 static int
 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
 {
        struct perf_buffer *rb = NULL;
        int ret = -EINVAL;
 
-       if (!output_event)
+       if (!output_event) {
+               mutex_lock(&event->mmap_mutex);
                goto set;
+       }
 
        /* don't allow circular references */
        if (event == output_event)
@@ -11870,8 +11881,15 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
            event->pmu != output_event->pmu)
                goto out;
 
+       /*
+        * Hold both mmap_mutex to serialize against perf_mmap_close().  Since
+        * output_event is already on rb->event_list, and the list iteration
+        * restarts after every removal, it is guaranteed this new event is
+        * observed *OR* if output_event is already removed, it's guaranteed we
+        * observe !rb->mmap_count.
+        */
+       mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
 set:
-       mutex_lock(&event->mmap_mutex);
        /* Can't redirect output if we've got an active mmap() */
        if (atomic_read(&event->mmap_count))
                goto unlock;
@@ -11881,6 +11899,12 @@ set:
                rb = ring_buffer_get(output_event);
                if (!rb)
                        goto unlock;
+
+               /* did we race against perf_mmap_close() */
+               if (!atomic_read(&rb->mmap_count)) {
+                       ring_buffer_put(rb);
+                       goto unlock;
+               }
        }
 
        ring_buffer_attach(event, rb);
@@ -11888,20 +11912,13 @@ set:
        ret = 0;
 unlock:
        mutex_unlock(&event->mmap_mutex);
+       if (output_event)
+               mutex_unlock(&output_event->mmap_mutex);
 
 out:
        return ret;
 }
 
-static void mutex_lock_double(struct mutex *a, struct mutex *b)
-{
-       if (b < a)
-               swap(a, b);
-
-       mutex_lock(a);
-       mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
-}
-
 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
 {
        bool nmi_safe = false;
index f072959..64c938c 100644 (file)
@@ -766,7 +766,7 @@ void __noreturn do_exit(long code)
 
 #ifdef CONFIG_POSIX_TIMERS
                hrtimer_cancel(&tsk->signal->real_timer);
-               exit_itimers(tsk->signal);
+               exit_itimers(tsk);
 #endif
                if (tsk->mm)
                        setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
index 80bfea5..cff3ae8 100644 (file)
@@ -127,8 +127,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
         * complain:
         */
        if (sysctl_hung_task_warnings) {
-               printk_prefer_direct_enter();
-
                if (sysctl_hung_task_warnings > 0)
                        sysctl_hung_task_warnings--;
                pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
@@ -144,8 +142,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
 
                if (sysctl_hung_task_all_cpu_backtrace)
                        hung_task_show_all_bt = true;
-
-               printk_prefer_direct_exit();
        }
 
        touch_nmi_watchdog();
@@ -208,17 +204,12 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
        }
  unlock:
        rcu_read_unlock();
-       if (hung_task_show_lock) {
-               printk_prefer_direct_enter();
+       if (hung_task_show_lock)
                debug_show_all_locks();
-               printk_prefer_direct_exit();
-       }
 
        if (hung_task_show_all_bt) {
                hung_task_show_all_bt = false;
-               printk_prefer_direct_enter();
                trigger_all_cpu_backtrace();
-               printk_prefer_direct_exit();
        }
 
        if (hung_task_call_panic)
index e6b8e56..886789d 100644 (file)
@@ -1006,8 +1006,10 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
                if (desc->irq_data.chip != &no_irq_chip)
                        mask_ack_irq(desc);
                irq_state_set_disabled(desc);
-               if (is_chained)
+               if (is_chained) {
                        desc->action = NULL;
+                       WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc)));
+               }
                desc->depth = 1;
        }
        desc->handle_irq = handle;
@@ -1033,6 +1035,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
                irq_settings_set_norequest(desc);
                irq_settings_set_nothread(desc);
                desc->action = &chained_action;
+               WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc)));
                irq_activate_and_startup(desc, IRQ_RESEND);
        }
 }
index 145321a..f9261c0 100644 (file)
 #include <linux/vmalloc.h>
 #include "kexec_internal.h"
 
+#ifdef CONFIG_KEXEC_SIG
+static bool sig_enforce = IS_ENABLED(CONFIG_KEXEC_SIG_FORCE);
+
+void set_kexec_sig_enforced(void)
+{
+       sig_enforce = true;
+}
+#endif
+
 static int kexec_calculate_store_digests(struct kimage *image);
 
 /*
@@ -159,7 +168,7 @@ kimage_validate_signature(struct kimage *image)
                                           image->kernel_buf_len);
        if (ret) {
 
-               if (IS_ENABLED(CONFIG_KEXEC_SIG_FORCE)) {
+               if (sig_enforce) {
                        pr_notice("Enforced kernel signature verification failed (%d).\n", ret);
                        return ret;
                }
index 544fd40..3c67791 100644 (file)
@@ -340,7 +340,7 @@ static int kthread(void *_create)
 
        self = to_kthread(current);
 
-       /* If user was SIGKILLed, I release the structure. */
+       /* Release the structure when caller killed by a fatal signal. */
        done = xchg(&create->done, NULL);
        if (!done) {
                kfree(create);
@@ -398,7 +398,7 @@ static void create_kthread(struct kthread_create_info *create)
        /* We want our own signal handler (we take no signals by default). */
        pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
        if (pid < 0) {
-               /* If user was SIGKILLed, I release the structure. */
+               /* Release the structure when caller killed by a fatal signal. */
                struct completion *done = xchg(&create->done, NULL);
 
                if (!done) {
@@ -440,9 +440,9 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
         */
        if (unlikely(wait_for_completion_killable(&done))) {
                /*
-                * If I was SIGKILLed before kthreadd (or new kernel thread)
-                * calls complete(), leave the cleanup of this structure to
-                * that thread.
+                * If I was killed by a fatal signal before kthreadd (or new
+                * kernel thread) calls complete(), leave the cleanup of this
+                * structure to that thread.
                 */
                if (xchg(&create->done, NULL))
                        return ERR_PTR(-EINTR);
@@ -876,7 +876,7 @@ fail_task:
  *
  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
- * when the worker was SIGKILLed.
+ * when the caller was killed by a fatal signal.
  */
 struct kthread_worker *
 kthread_create_worker(unsigned int flags, const char namefmt[], ...)
@@ -925,7 +925,7 @@ EXPORT_SYMBOL(kthread_create_worker);
  * Return:
  * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
- * when the worker was SIGKILLed.
+ * when the caller was killed by a fatal signal.
  */
 struct kthread_worker *
 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
index 81e8728..f06b91c 100644 (file)
@@ -5432,7 +5432,7 @@ static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
                         * be guessable and still allows some pin nesting in
                         * our u32 pin_count.
                         */
-                       cookie.val = 1 + (prandom_u32() >> 16);
+                       cookie.val = 1 + (sched_clock() & 0xffff);
                        hlock->pin_count += cookie.val;
                        return cookie;
                }
index 9d1db4a..65f0262 100644 (file)
@@ -335,8 +335,6 @@ struct rwsem_waiter {
        struct task_struct *task;
        enum rwsem_waiter_type type;
        unsigned long timeout;
-
-       /* Writer only, not initialized in reader */
        bool handoff_set;
 };
 #define rwsem_first_waiter(sem) \
@@ -459,10 +457,12 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
                         * to give up the lock), request a HANDOFF to
                         * force the issue.
                         */
-                       if (!(oldcount & RWSEM_FLAG_HANDOFF) &&
-                           time_after(jiffies, waiter->timeout)) {
-                               adjustment -= RWSEM_FLAG_HANDOFF;
-                               lockevent_inc(rwsem_rlock_handoff);
+                       if (time_after(jiffies, waiter->timeout)) {
+                               if (!(oldcount & RWSEM_FLAG_HANDOFF)) {
+                                       adjustment -= RWSEM_FLAG_HANDOFF;
+                                       lockevent_inc(rwsem_rlock_handoff);
+                               }
+                               waiter->handoff_set = true;
                        }
 
                        atomic_long_add(-adjustment, &sem->count);
@@ -599,7 +599,7 @@ rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter,
 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
                                        struct rwsem_waiter *waiter)
 {
-       bool first = rwsem_first_waiter(sem) == waiter;
+       struct rwsem_waiter *first = rwsem_first_waiter(sem);
        long count, new;
 
        lockdep_assert_held(&sem->wait_lock);
@@ -609,11 +609,20 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
                bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
 
                if (has_handoff) {
-                       if (!first)
+                       /*
+                        * Honor handoff bit and yield only when the first
+                        * waiter is the one that set it. Otherwisee, we
+                        * still try to acquire the rwsem.
+                        */
+                       if (first->handoff_set && (waiter != first))
                                return false;
 
-                       /* First waiter inherits a previously set handoff bit */
-                       waiter->handoff_set = true;
+                       /*
+                        * First waiter can inherit a previously set handoff
+                        * bit and spin on rwsem if lock acquisition fails.
+                        */
+                       if (waiter == first)
+                               waiter->handoff_set = true;
                }
 
                new = count;
@@ -1027,6 +1036,7 @@ queue:
        waiter.task = current;
        waiter.type = RWSEM_WAITING_FOR_READ;
        waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
+       waiter.handoff_set = false;
 
        raw_spin_lock_irq(&sem->wait_lock);
        if (list_empty(&sem->wait_list)) {
index bc5507a..ec104c2 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/mutex.h>
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
+#include <linux/mm.h>
 
 #ifndef ARCH_SHF_SMALL
 #define ARCH_SHF_SMALL 0
  * to ensure complete separation of code and data, but
  * only when CONFIG_STRICT_MODULE_RWX=y
  */
-#ifdef CONFIG_STRICT_MODULE_RWX
-# define strict_align(X) PAGE_ALIGN(X)
-#else
-# define strict_align(X) (X)
-#endif
+static inline unsigned int strict_align(unsigned int size)
+{
+       if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
+               return PAGE_ALIGN(size);
+       else
+               return size;
+}
 
 extern struct mutex module_mutex;
 extern struct list_head modules;
index 3e11523..77e75be 100644 (file)
@@ -137,6 +137,7 @@ void layout_symtab(struct module *mod, struct load_info *info)
        info->symoffs = ALIGN(mod->data_layout.size, symsect->sh_addralign ?: 1);
        info->stroffs = mod->data_layout.size = info->symoffs + ndst * sizeof(Elf_Sym);
        mod->data_layout.size += strtab_size;
+       /* Note add_kallsyms() computes strtab_size as core_typeoffs - stroffs */
        info->core_typeoffs = mod->data_layout.size;
        mod->data_layout.size += ndst * sizeof(char);
        mod->data_layout.size = strict_align(mod->data_layout.size);
@@ -169,19 +170,20 @@ void add_kallsyms(struct module *mod, const struct load_info *info)
        Elf_Sym *dst;
        char *s;
        Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
+       unsigned long strtab_size;
 
        /* Set up to point into init section. */
        mod->kallsyms = (void __rcu *)mod->init_layout.base +
                info->mod_kallsyms_init_off;
 
-       preempt_disable();
+       rcu_read_lock();
        /* The following is safe since this pointer cannot change */
-       rcu_dereference_sched(mod->kallsyms)->symtab = (void *)symsec->sh_addr;
-       rcu_dereference_sched(mod->kallsyms)->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
+       rcu_dereference(mod->kallsyms)->symtab = (void *)symsec->sh_addr;
+       rcu_dereference(mod->kallsyms)->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
        /* Make sure we get permanent strtab: don't use info->strtab. */
-       rcu_dereference_sched(mod->kallsyms)->strtab =
+       rcu_dereference(mod->kallsyms)->strtab =
                (void *)info->sechdrs[info->index.str].sh_addr;
-       rcu_dereference_sched(mod->kallsyms)->typetab = mod->init_layout.base + info->init_typeoffs;
+       rcu_dereference(mod->kallsyms)->typetab = mod->init_layout.base + info->init_typeoffs;
 
        /*
         * Now populate the cut down core kallsyms for after init
@@ -190,22 +192,29 @@ void add_kallsyms(struct module *mod, const struct load_info *info)
        mod->core_kallsyms.symtab = dst = mod->data_layout.base + info->symoffs;
        mod->core_kallsyms.strtab = s = mod->data_layout.base + info->stroffs;
        mod->core_kallsyms.typetab = mod->data_layout.base + info->core_typeoffs;
-       src = rcu_dereference_sched(mod->kallsyms)->symtab;
-       for (ndst = i = 0; i < rcu_dereference_sched(mod->kallsyms)->num_symtab; i++) {
-               rcu_dereference_sched(mod->kallsyms)->typetab[i] = elf_type(src + i, info);
+       strtab_size = info->core_typeoffs - info->stroffs;
+       src = rcu_dereference(mod->kallsyms)->symtab;
+       for (ndst = i = 0; i < rcu_dereference(mod->kallsyms)->num_symtab; i++) {
+               rcu_dereference(mod->kallsyms)->typetab[i] = elf_type(src + i, info);
                if (i == 0 || is_livepatch_module(mod) ||
                    is_core_symbol(src + i, info->sechdrs, info->hdr->e_shnum,
                                   info->index.pcpu)) {
+                       ssize_t ret;
+
                        mod->core_kallsyms.typetab[ndst] =
-                           rcu_dereference_sched(mod->kallsyms)->typetab[i];
+                           rcu_dereference(mod->kallsyms)->typetab[i];
                        dst[ndst] = src[i];
                        dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
-                       s += strscpy(s,
-                                    &rcu_dereference_sched(mod->kallsyms)->strtab[src[i].st_name],
-                                    KSYM_NAME_LEN) + 1;
+                       ret = strscpy(s,
+                                     &rcu_dereference(mod->kallsyms)->strtab[src[i].st_name],
+                                     strtab_size);
+                       if (ret < 0)
+                               break;
+                       s += ret + 1;
+                       strtab_size -= ret + 1;
                }
        }
-       preempt_enable();
+       rcu_read_unlock();
        mod->core_kallsyms.num_symtab = ndst;
 }
 
index fed58d3..0548151 100644 (file)
@@ -2939,24 +2939,25 @@ static void cfi_init(struct module *mod)
 {
 #ifdef CONFIG_CFI_CLANG
        initcall_t *init;
+#ifdef CONFIG_MODULE_UNLOAD
        exitcall_t *exit;
+#endif
 
        rcu_read_lock_sched();
        mod->cfi_check = (cfi_check_fn)
                find_kallsyms_symbol_value(mod, "__cfi_check");
        init = (initcall_t *)
                find_kallsyms_symbol_value(mod, "__cfi_jt_init_module");
-       exit = (exitcall_t *)
-               find_kallsyms_symbol_value(mod, "__cfi_jt_cleanup_module");
-       rcu_read_unlock_sched();
-
        /* Fix init/exit functions to point to the CFI jump table */
        if (init)
                mod->init = *init;
 #ifdef CONFIG_MODULE_UNLOAD
+       exit = (exitcall_t *)
+               find_kallsyms_symbol_value(mod, "__cfi_jt_cleanup_module");
        if (exit)
                mod->exit = *exit;
 #endif
+       rcu_read_unlock_sched();
 
        cfi_module_add(mod, mod_tree.addr_min);
 #endif
index a3c758d..a3308af 100644 (file)
@@ -603,8 +603,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
 {
        disable_trace_on_warning();
 
-       printk_prefer_direct_enter();
-
        if (file)
                pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
                        raw_smp_processor_id(), current->pid, file, line,
@@ -634,8 +632,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
 
        /* Just a warning, don't kill lockdep. */
        add_taint(taint, LOCKDEP_STILL_OK);
-
-       printk_prefer_direct_exit();
 }
 
 #ifndef __WARN_FLAGS
index 20a66bf..89c71fc 100644 (file)
@@ -665,7 +665,7 @@ static void power_down(void)
                hibernation_platform_enter();
                fallthrough;
        case HIBERNATION_SHUTDOWN:
-               if (pm_power_off)
+               if (kernel_can_power_off())
                        kernel_power_off();
                break;
        }
index ea3dd55..a1a81fd 100644 (file)
@@ -224,33 +224,6 @@ int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
 static int nr_ext_console_drivers;
 
 /*
- * Used to synchronize printing kthreads against direct printing via
- * console_trylock/console_unlock.
- *
- * Values:
- * -1 = console kthreads atomically blocked (via global trylock)
- *  0 = no kthread printing, console not locked (via trylock)
- * >0 = kthread(s) actively printing
- *
- * Note: For synchronizing against direct printing via
- *       console_lock/console_unlock, see the @lock variable in
- *       struct console.
- */
-static atomic_t console_kthreads_active = ATOMIC_INIT(0);
-
-#define console_kthreads_atomic_tryblock() \
-       (atomic_cmpxchg(&console_kthreads_active, 0, -1) == 0)
-#define console_kthreads_atomic_unblock() \
-       atomic_cmpxchg(&console_kthreads_active, -1, 0)
-#define console_kthreads_atomically_blocked() \
-       (atomic_read(&console_kthreads_active) == -1)
-
-#define console_kthread_printing_tryenter() \
-       atomic_inc_unless_negative(&console_kthreads_active)
-#define console_kthread_printing_exit() \
-       atomic_dec(&console_kthreads_active)
-
-/*
  * Helper macros to handle lockdep when locking/unlocking console_sem. We use
  * macros instead of functions so that _RET_IP_ contains useful information.
  */
@@ -298,49 +271,14 @@ static bool panic_in_progress(void)
 }
 
 /*
- * Tracks whether kthread printers are all blocked. A value of true implies
- * that the console is locked via console_lock() or the console is suspended.
- * Writing to this variable requires holding @console_sem.
- */
-static bool console_kthreads_blocked;
-
-/*
- * Block all kthread printers from a schedulable context.
- *
- * Requires holding @console_sem.
- */
-static void console_kthreads_block(void)
-{
-       struct console *con;
-
-       for_each_console(con) {
-               mutex_lock(&con->lock);
-               con->blocked = true;
-               mutex_unlock(&con->lock);
-       }
-
-       console_kthreads_blocked = true;
-}
-
-/*
- * Unblock all kthread printers from a schedulable context.
- *
- * Requires holding @console_sem.
+ * This is used for debugging the mess that is the VT code by
+ * keeping track if we have the console semaphore held. It's
+ * definitely not the perfect debug tool (we don't know if _WE_
+ * hold it and are racing, but it helps tracking those weird code
+ * paths in the console code where we end up in places I want
+ * locked without the console semaphore held).
  */
-static void console_kthreads_unblock(void)
-{
-       struct console *con;
-
-       for_each_console(con) {
-               mutex_lock(&con->lock);
-               con->blocked = false;
-               mutex_unlock(&con->lock);
-       }
-
-       console_kthreads_blocked = false;
-}
-
-static int console_suspended;
+static int console_locked, console_suspended;
 
 /*
  *     Array of consoles built from command line options (console=)
@@ -423,75 +361,7 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
 /* syslog_lock protects syslog_* variables and write access to clear_seq. */
 static DEFINE_MUTEX(syslog_lock);
 
-/*
- * A flag to signify if printk_activate_kthreads() has already started the
- * kthread printers. If true, any later registered consoles must start their
- * own kthread directly. The flag is write protected by the console_lock.
- */
-static bool printk_kthreads_available;
-
 #ifdef CONFIG_PRINTK
-static atomic_t printk_prefer_direct = ATOMIC_INIT(0);
-
-/**
- * printk_prefer_direct_enter - cause printk() calls to attempt direct
- *                              printing to all enabled consoles
- *
- * Since it is not possible to call into the console printing code from any
- * context, there is no guarantee that direct printing will occur.
- *
- * This globally effects all printk() callers.
- *
- * Context: Any context.
- */
-void printk_prefer_direct_enter(void)
-{
-       atomic_inc(&printk_prefer_direct);
-}
-
-/**
- * printk_prefer_direct_exit - restore printk() behavior
- *
- * Context: Any context.
- */
-void printk_prefer_direct_exit(void)
-{
-       WARN_ON(atomic_dec_if_positive(&printk_prefer_direct) < 0);
-}
-
-/*
- * Calling printk() always wakes kthread printers so that they can
- * flush the new message to their respective consoles. Also, if direct
- * printing is allowed, printk() tries to flush the messages directly.
- *
- * Direct printing is allowed in situations when the kthreads
- * are not available or the system is in a problematic state.
- *
- * See the implementation about possible races.
- */
-static inline bool allow_direct_printing(void)
-{
-       /*
-        * Checking kthread availability is a possible race because the
-        * kthread printers can become permanently disabled during runtime.
-        * However, doing that requires holding the console_lock, so any
-        * pending messages will be direct printed by console_unlock().
-        */
-       if (!printk_kthreads_available)
-               return true;
-
-       /*
-        * Prefer direct printing when the system is in a problematic state.
-        * The context that sets this state will always see the updated value.
-        * The other contexts do not care. Anyway, direct printing is just a
-        * best effort. The direct output is only possible when console_lock
-        * is not already taken and no kthread printers are actively printing.
-        */
-       return (system_state > SYSTEM_RUNNING ||
-               oops_in_progress ||
-               atomic_read(&printk_prefer_direct));
-}
-
 DECLARE_WAIT_QUEUE_HEAD(log_wait);
 /* All 3 protected by @syslog_lock. */
 /* the next printk record to read by syslog(READ) or /proc/kmsg */
@@ -2382,10 +2252,10 @@ asmlinkage int vprintk_emit(int facility, int level,
        printed_len = vprintk_store(facility, level, dev_info, fmt, args);
 
        /* If called from the scheduler, we can not call up(). */
-       if (!in_sched && allow_direct_printing()) {
+       if (!in_sched) {
                /*
                 * The caller may be holding system-critical or
-                * timing-sensitive locks. Disable preemption during direct
+                * timing-sensitive locks. Disable preemption during
                 * printing of all remaining records to all consoles so that
                 * this context can return as soon as possible. Hopefully
                 * another printk() caller will take over the printing.
@@ -2428,8 +2298,6 @@ EXPORT_SYMBOL(_printk);
 
 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
 
-static void printk_start_kthread(struct console *con);
-
 #else /* CONFIG_PRINTK */
 
 #define CONSOLE_LOG_MAX                0
@@ -2463,8 +2331,6 @@ static void call_console_driver(struct console *con, const char *text, size_t le
 }
 static bool suppress_message_printing(int level) { return false; }
 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
-static void printk_start_kthread(struct console *con) { }
-static bool allow_direct_printing(void) { return true; }
 
 #endif /* CONFIG_PRINTK */
 
@@ -2683,14 +2549,6 @@ static int console_cpu_notify(unsigned int cpu)
                /* If trylock fails, someone else is doing the printing */
                if (console_trylock())
                        console_unlock();
-               else {
-                       /*
-                        * If a new CPU comes online, the conditions for
-                        * printer_should_wake() may have changed for some
-                        * kthread printer with !CON_ANYTIME.
-                        */
-                       wake_up_klogd();
-               }
        }
        return 0;
 }
@@ -2710,7 +2568,7 @@ void console_lock(void)
        down_console_sem();
        if (console_suspended)
                return;
-       console_kthreads_block();
+       console_locked = 1;
        console_may_schedule = 1;
 }
 EXPORT_SYMBOL(console_lock);
@@ -2731,30 +2589,15 @@ int console_trylock(void)
                up_console_sem();
                return 0;
        }
-       if (!console_kthreads_atomic_tryblock()) {
-               up_console_sem();
-               return 0;
-       }
+       console_locked = 1;
        console_may_schedule = 0;
        return 1;
 }
 EXPORT_SYMBOL(console_trylock);
 
-/*
- * This is used to help to make sure that certain paths within the VT code are
- * running with the console lock held. It is definitely not the perfect debug
- * tool (it is not known if the VT code is the task holding the console lock),
- * but it helps tracking those weird code paths in the console code such as
- * when the console is suspended: where the console is not locked but no
- * console printing may occur.
- *
- * Note: This returns true when the console is suspended but is not locked.
- *       This is intentional because the VT code must consider that situation
- *       the same as if the console was locked.
- */
 int is_console_locked(void)
 {
-       return (console_kthreads_blocked || atomic_read(&console_kthreads_active));
+       return console_locked;
 }
 EXPORT_SYMBOL(is_console_locked);
 
@@ -2777,9 +2620,18 @@ static bool abandon_console_lock_in_panic(void)
        return atomic_read(&panic_cpu) != raw_smp_processor_id();
 }
 
-static inline bool __console_is_usable(short flags)
+/*
+ * Check if the given console is currently capable and allowed to print
+ * records.
+ *
+ * Requires the console_lock.
+ */
+static inline bool console_is_usable(struct console *con)
 {
-       if (!(flags & CON_ENABLED))
+       if (!(con->flags & CON_ENABLED))
+               return false;
+
+       if (!con->write)
                return false;
 
        /*
@@ -2788,43 +2640,15 @@ static inline bool __console_is_usable(short flags)
         * cope (CON_ANYTIME) don't call them until this CPU is officially up.
         */
        if (!cpu_online(raw_smp_processor_id()) &&
-           !(flags & CON_ANYTIME))
+           !(con->flags & CON_ANYTIME))
                return false;
 
        return true;
 }
 
-/*
- * Check if the given console is currently capable and allowed to print
- * records.
- *
- * Requires holding the console_lock.
- */
-static inline bool console_is_usable(struct console *con)
-{
-       if (!con->write)
-               return false;
-
-       return __console_is_usable(con->flags);
-}
-
 static void __console_unlock(void)
 {
-       /*
-        * Depending on whether console_lock() or console_trylock() was used,
-        * appropriately allow the kthread printers to continue.
-        */
-       if (console_kthreads_blocked)
-               console_kthreads_unblock();
-       else
-               console_kthreads_atomic_unblock();
-
-       /*
-        * New records may have arrived while the console was locked.
-        * Wake the kthread printers to print them.
-        */
-       wake_up_klogd();
-
+       console_locked = 0;
        up_console_sem();
 }
 
@@ -2842,19 +2666,17 @@ static void __console_unlock(void)
  *
  * @handover will be set to true if a printk waiter has taken over the
  * console_lock, in which case the caller is no longer holding the
- * console_lock. Otherwise it is set to false. A NULL pointer may be provided
- * to disable allowing the console_lock to be taken over by a printk waiter.
+ * console_lock. Otherwise it is set to false.
  *
  * Returns false if the given console has no next record to print, otherwise
  * true.
  *
- * Requires the console_lock if @handover is non-NULL.
- * Requires con->lock otherwise.
+ * Requires the console_lock.
  */
-static bool __console_emit_next_record(struct console *con, char *text, char *ext_text,
-                                      char *dropped_text, bool *handover)
+static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
+                                    char *dropped_text, bool *handover)
 {
-       static atomic_t panic_console_dropped = ATOMIC_INIT(0);
+       static int panic_console_dropped;
        struct printk_info info;
        struct printk_record r;
        unsigned long flags;
@@ -2863,8 +2685,7 @@ static bool __console_emit_next_record(struct console *con, char *text, char *ex
 
        prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
 
-       if (handover)
-               *handover = false;
+       *handover = false;
 
        if (!prb_read_valid(prb, con->seq, &r))
                return false;
@@ -2872,8 +2693,7 @@ static bool __console_emit_next_record(struct console *con, char *text, char *ex
        if (con->seq != r.info->seq) {
                con->dropped += r.info->seq - con->seq;
                con->seq = r.info->seq;
-               if (panic_in_progress() &&
-                   atomic_fetch_inc_relaxed(&panic_console_dropped) > 10) {
+               if (panic_in_progress() && panic_console_dropped++ > 10) {
                        suppress_panic_printk = 1;
                        pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n");
                }
@@ -2895,62 +2715,32 @@ static bool __console_emit_next_record(struct console *con, char *text, char *ex
                len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
        }
 
-       if (handover) {
-               /*
-                * While actively printing out messages, if another printk()
-                * were to occur on another CPU, it may wait for this one to
-                * finish. This task can not be preempted if there is a
-                * waiter waiting to take over.
-                *
-                * Interrupts are disabled because the hand over to a waiter
-                * must not be interrupted until the hand over is completed
-                * (@console_waiter is cleared).
-                */
-               printk_safe_enter_irqsave(flags);
-               console_lock_spinning_enable();
-
-               /* don't trace irqsoff print latency */
-               stop_critical_timings();
-       }
+       /*
+        * While actively printing out messages, if another printk()
+        * were to occur on another CPU, it may wait for this one to
+        * finish. This task can not be preempted if there is a
+        * waiter waiting to take over.
+        *
+        * Interrupts are disabled because the hand over to a waiter
+        * must not be interrupted until the hand over is completed
+        * (@console_waiter is cleared).
+        */
+       printk_safe_enter_irqsave(flags);
+       console_lock_spinning_enable();
 
+       stop_critical_timings();        /* don't trace print latency */
        call_console_driver(con, write_text, len, dropped_text);
+       start_critical_timings();
 
        con->seq++;
 
-       if (handover) {
-               start_critical_timings();
-               *handover = console_lock_spinning_disable_and_check();
-               printk_safe_exit_irqrestore(flags);
-       }
+       *handover = console_lock_spinning_disable_and_check();
+       printk_safe_exit_irqrestore(flags);
 skip:
        return true;
 }
 
 /*
- * Print a record for a given console, but allow another printk() caller to
- * take over the console_lock and continue printing.
- *
- * Requires the console_lock, but depending on @handover after the call, the
- * caller may no longer have the console_lock.
- *
- * See __console_emit_next_record() for argument and return details.
- */
-static bool console_emit_next_record_transferable(struct console *con, char *text, char *ext_text,
-                                                 char *dropped_text, bool *handover)
-{
-       /*
-        * Handovers are only supported if threaded printers are atomically
-        * blocked. The context taking over the console_lock may be atomic.
-        */
-       if (!console_kthreads_atomically_blocked()) {
-               *handover = false;
-               handover = NULL;
-       }
-
-       return __console_emit_next_record(con, text, ext_text, dropped_text, handover);
-}
-
-/*
  * Print out all remaining records to all consoles.
  *
  * @do_cond_resched is set by the caller. It can be true only in schedulable
@@ -2968,8 +2758,8 @@ static bool console_emit_next_record_transferable(struct console *con, char *tex
  * were flushed to all usable consoles. A returned false informs the caller
  * that everything was not flushed (either there were no usable consoles or
  * another context has taken over printing or it is a panic situation and this
- * is not the panic CPU or direct printing is not preferred). Regardless the
- * reason, the caller should assume it is not useful to immediately try again.
+ * is not the panic CPU). Regardless the reason, the caller should assume it
+ * is not useful to immediately try again.
  *
  * Requires the console_lock.
  */
@@ -2986,10 +2776,6 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
        *handover = false;
 
        do {
-               /* Let the kthread printers do the work if they can. */
-               if (!allow_direct_printing())
-                       return false;
-
                any_progress = false;
 
                for_each_console(con) {
@@ -3001,11 +2787,13 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
 
                        if (con->flags & CON_EXTENDED) {
                                /* Extended consoles do not print "dropped messages". */
-                               progress = console_emit_next_record_transferable(con, &text[0],
-                                                               &ext_text[0], NULL, handover);
+                               progress = console_emit_next_record(con, &text[0],
+                                                                   &ext_text[0], NULL,
+                                                                   handover);
                        } else {
-                               progress = console_emit_next_record_transferable(con, &text[0],
-                                                               NULL, &dropped_text[0], handover);
+                               progress = console_emit_next_record(con, &text[0],
+                                                                   NULL, &dropped_text[0],
+                                                                   handover);
                        }
                        if (*handover)
                                return false;
@@ -3120,13 +2908,10 @@ void console_unblank(void)
        if (oops_in_progress) {
                if (down_trylock_console_sem() != 0)
                        return;
-               if (!console_kthreads_atomic_tryblock()) {
-                       up_console_sem();
-                       return;
-               }
        } else
                console_lock();
 
+       console_locked = 1;
        console_may_schedule = 0;
        for_each_console(c)
                if ((c->flags & CON_ENABLED) && c->unblank)
@@ -3405,10 +3190,6 @@ void register_console(struct console *newcon)
                nr_ext_console_drivers++;
 
        newcon->dropped = 0;
-       newcon->thread = NULL;
-       newcon->blocked = true;
-       mutex_init(&newcon->lock);
-
        if (newcon->flags & CON_PRINTBUFFER) {
                /* Get a consistent copy of @syslog_seq. */
                mutex_lock(&syslog_lock);
@@ -3418,10 +3199,6 @@ void register_console(struct console *newcon)
                /* Begin with next message. */
                newcon->seq = prb_next_seq(prb);
        }
-
-       if (printk_kthreads_available)
-               printk_start_kthread(newcon);
-
        console_unlock();
        console_sysfs_notify();
 
@@ -3448,7 +3225,6 @@ EXPORT_SYMBOL(register_console);
 
 int unregister_console(struct console *console)
 {
-       struct task_struct *thd;
        struct console *con;
        int res;
 
@@ -3489,20 +3265,7 @@ int unregister_console(struct console *console)
                console_drivers->flags |= CON_CONSDEV;
 
        console->flags &= ~CON_ENABLED;
-
-       /*
-        * console->thread can only be cleared under the console lock. But
-        * stopping the thread must be done without the console lock. The
-        * task that clears @thread is the task that stops the kthread.
-        */
-       thd = console->thread;
-       console->thread = NULL;
-
        console_unlock();
-
-       if (thd)
-               kthread_stop(thd);
-
        console_sysfs_notify();
 
        if (console->exit)
@@ -3598,20 +3361,6 @@ static int __init printk_late_init(void)
 }
 late_initcall(printk_late_init);
 
-static int __init printk_activate_kthreads(void)
-{
-       struct console *con;
-
-       console_lock();
-       printk_kthreads_available = true;
-       for_each_console(con)
-               printk_start_kthread(con);
-       console_unlock();
-
-       return 0;
-}
-early_initcall(printk_activate_kthreads);
-
 #if defined CONFIG_PRINTK
 /* If @con is specified, only wait for that console. Otherwise wait for all. */
 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
@@ -3631,6 +3380,7 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
                diff = 0;
 
                console_lock();
+
                for_each_console(c) {
                        if (con && con != c)
                                continue;
@@ -3640,11 +3390,19 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
                        if (printk_seq < seq)
                                diff += seq - printk_seq;
                }
-               console_unlock();
 
-               if (diff != last_diff && reset_on_progress)
+               /*
+                * If consoles are suspended, it cannot be expected that they
+                * make forward progress, so timeout immediately. @diff is
+                * still used to return a valid flush status.
+                */
+               if (console_suspended)
+                       remaining = 0;
+               else if (diff != last_diff && reset_on_progress)
                        remaining = timeout_ms;
 
+               console_unlock();
+
                if (diff == 0 || remaining == 0)
                        break;
 
@@ -3686,206 +3444,11 @@ bool pr_flush(int timeout_ms, bool reset_on_progress)
 }
 EXPORT_SYMBOL(pr_flush);
 
-static void __printk_fallback_preferred_direct(void)
-{
-       printk_prefer_direct_enter();
-       pr_err("falling back to preferred direct printing\n");
-       printk_kthreads_available = false;
-}
-
-/*
- * Enter preferred direct printing, but never exit. Mark console threads as
- * unavailable. The system is then forever in preferred direct printing and
- * any printing threads will exit.
- *
- * Must *not* be called under console_lock. Use
- * __printk_fallback_preferred_direct() if already holding console_lock.
- */
-static void printk_fallback_preferred_direct(void)
-{
-       console_lock();
-       __printk_fallback_preferred_direct();
-       console_unlock();
-}
-
-/*
- * Print a record for a given console, not allowing another printk() caller
- * to take over. This is appropriate for contexts that do not have the
- * console_lock.
- *
- * See __console_emit_next_record() for argument and return details.
- */
-static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
-                                    char *dropped_text)
-{
-       return __console_emit_next_record(con, text, ext_text, dropped_text, NULL);
-}
-
-static bool printer_should_wake(struct console *con, u64 seq)
-{
-       short flags;
-
-       if (kthread_should_stop() || !printk_kthreads_available)
-               return true;
-
-       if (con->blocked ||
-           console_kthreads_atomically_blocked()) {
-               return false;
-       }
-
-       /*
-        * This is an unsafe read from con->flags, but a false positive is
-        * not a problem. Worst case it would allow the printer to wake up
-        * although it is disabled. But the printer will notice that when
-        * attempting to print and instead go back to sleep.
-        */
-       flags = data_race(READ_ONCE(con->flags));
-
-       if (!__console_is_usable(flags))
-               return false;
-
-       return prb_read_valid(prb, seq, NULL);
-}
-
-static int printk_kthread_func(void *data)
-{
-       struct console *con = data;
-       char *dropped_text = NULL;
-       char *ext_text = NULL;
-       u64 seq = 0;
-       char *text;
-       int error;
-
-       text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
-       if (!text) {
-               con_printk(KERN_ERR, con, "failed to allocate text buffer\n");
-               printk_fallback_preferred_direct();
-               goto out;
-       }
-
-       if (con->flags & CON_EXTENDED) {
-               ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL);
-               if (!ext_text) {
-                       con_printk(KERN_ERR, con, "failed to allocate ext_text buffer\n");
-                       printk_fallback_preferred_direct();
-                       goto out;
-               }
-       } else {
-               dropped_text = kmalloc(DROPPED_TEXT_MAX, GFP_KERNEL);
-               if (!dropped_text) {
-                       con_printk(KERN_ERR, con, "failed to allocate dropped_text buffer\n");
-                       printk_fallback_preferred_direct();
-                       goto out;
-               }
-       }
-
-       con_printk(KERN_INFO, con, "printing thread started\n");
-
-       for (;;) {
-               /*
-                * Guarantee this task is visible on the waitqueue before
-                * checking the wake condition.
-                *
-                * The full memory barrier within set_current_state() of
-                * prepare_to_wait_event() pairs with the full memory barrier
-                * within wq_has_sleeper().
-                *
-                * This pairs with __wake_up_klogd:A.
-                */
-               error = wait_event_interruptible(log_wait,
-                               printer_should_wake(con, seq)); /* LMM(printk_kthread_func:A) */
-
-               if (kthread_should_stop() || !printk_kthreads_available)
-                       break;
-
-               if (error)
-                       continue;
-
-               error = mutex_lock_interruptible(&con->lock);
-               if (error)
-                       continue;
-
-               if (con->blocked ||
-                   !console_kthread_printing_tryenter()) {
-                       /* Another context has locked the console_lock. */
-                       mutex_unlock(&con->lock);
-                       continue;
-               }
-
-               /*
-                * Although this context has not locked the console_lock, it
-                * is known that the console_lock is not locked and it is not
-                * possible for any other context to lock the console_lock.
-                * Therefore it is safe to read con->flags.
-                */
-
-               if (!__console_is_usable(con->flags)) {
-                       console_kthread_printing_exit();
-                       mutex_unlock(&con->lock);
-                       continue;
-               }
-
-               /*
-                * Even though the printk kthread is always preemptible, it is
-                * still not allowed to call cond_resched() from within
-                * console drivers. The task may become non-preemptible in the
-                * console driver call chain. For example, vt_console_print()
-                * takes a spinlock and then can call into fbcon_redraw(),
-                * which can conditionally invoke cond_resched().
-                */
-               console_may_schedule = 0;
-               console_emit_next_record(con, text, ext_text, dropped_text);
-
-               seq = con->seq;
-
-               console_kthread_printing_exit();
-
-               mutex_unlock(&con->lock);
-       }
-
-       con_printk(KERN_INFO, con, "printing thread stopped\n");
-out:
-       kfree(dropped_text);
-       kfree(ext_text);
-       kfree(text);
-
-       console_lock();
-       /*
-        * If this kthread is being stopped by another task, con->thread will
-        * already be NULL. That is fine. The important thing is that it is
-        * NULL after the kthread exits.
-        */
-       con->thread = NULL;
-       console_unlock();
-
-       return 0;
-}
-
-/* Must be called under console_lock. */
-static void printk_start_kthread(struct console *con)
-{
-       /*
-        * Do not start a kthread if there is no write() callback. The
-        * kthreads assume the write() callback exists.
-        */
-       if (!con->write)
-               return;
-
-       con->thread = kthread_run(printk_kthread_func, con,
-                                 "pr/%s%d", con->name, con->index);
-       if (IS_ERR(con->thread)) {
-               con->thread = NULL;
-               con_printk(KERN_ERR, con, "unable to start printing thread\n");
-               __printk_fallback_preferred_direct();
-               return;
-       }
-}
-
 /*
  * Delayed printk version, for scheduler-internal messages:
  */
-#define PRINTK_PENDING_WAKEUP          0x01
-#define PRINTK_PENDING_DIRECT_OUTPUT   0x02
+#define PRINTK_PENDING_WAKEUP  0x01
+#define PRINTK_PENDING_OUTPUT  0x02
 
 static DEFINE_PER_CPU(int, printk_pending);
 
@@ -3893,14 +3456,10 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
 {
        int pending = this_cpu_xchg(printk_pending, 0);
 
-       if (pending & PRINTK_PENDING_DIRECT_OUTPUT) {
-               printk_prefer_direct_enter();
-
+       if (pending & PRINTK_PENDING_OUTPUT) {
                /* If trylock fails, someone else is doing the printing */
                if (console_trylock())
                        console_unlock();
-
-               printk_prefer_direct_exit();
        }
 
        if (pending & PRINTK_PENDING_WAKEUP)
@@ -3925,11 +3484,10 @@ static void __wake_up_klogd(int val)
         * prepare_to_wait_event(), which is called after ___wait_event() adds
         * the waiter but before it has checked the wait condition.
         *
-        * This pairs with devkmsg_read:A, syslog_print:A, and
-        * printk_kthread_func:A.
+        * This pairs with devkmsg_read:A and syslog_print:A.
         */
        if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */
-           (val & PRINTK_PENDING_DIRECT_OUTPUT)) {
+           (val & PRINTK_PENDING_OUTPUT)) {
                this_cpu_or(printk_pending, val);
                irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
        }
@@ -3947,17 +3505,7 @@ void defer_console_output(void)
         * New messages may have been added directly to the ringbuffer
         * using vprintk_store(), so wake any waiters as well.
         */
-       int val = PRINTK_PENDING_WAKEUP;
-
-       /*
-        * Make sure that some context will print the messages when direct
-        * printing is allowed. This happens in situations when the kthreads
-        * may not be as reliable or perhaps unusable.
-        */
-       if (allow_direct_printing())
-               val |= PRINTK_PENDING_DIRECT_OUTPUT;
-
-       __wake_up_klogd(val);
+       __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
 }
 
 void printk_trigger_flush(void)
index 156a992..1893d90 100644 (file)
@@ -222,7 +222,7 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
        if (lock_task_sighand(task, &flags)) {
                task->jobctl &= ~JOBCTL_PTRACE_FROZEN;
                if (__fatal_signal_pending(task)) {
-                       task->jobctl &= ~TASK_TRACED;
+                       task->jobctl &= ~JOBCTL_TRACED;
                        wake_up_state(task, __TASK_TRACED);
                }
                unlock_task_sighand(task, &flags);
index 50ba70f..1c304fe 100644 (file)
@@ -511,10 +511,52 @@ static bool srcu_readers_active(struct srcu_struct *ssp)
        return sum;
 }
 
-#define SRCU_INTERVAL          1       // Base delay if no expedited GPs pending.
-#define SRCU_MAX_INTERVAL      10      // Maximum incremental delay from slow readers.
-#define SRCU_MAX_NODELAY_PHASE 1       // Maximum per-GP-phase consecutive no-delay instances.
-#define SRCU_MAX_NODELAY       100     // Maximum consecutive no-delay instances.
+/*
+ * We use an adaptive strategy for synchronize_srcu() and especially for
+ * synchronize_srcu_expedited().  We spin for a fixed time period
+ * (defined below, boot time configurable) to allow SRCU readers to exit
+ * their read-side critical sections.  If there are still some readers
+ * after one jiffy, we repeatedly block for one jiffy time periods.
+ * The blocking time is increased as the grace-period age increases,
+ * with max blocking time capped at 10 jiffies.
+ */
+#define SRCU_DEFAULT_RETRY_CHECK_DELAY         5
+
+static ulong srcu_retry_check_delay = SRCU_DEFAULT_RETRY_CHECK_DELAY;
+module_param(srcu_retry_check_delay, ulong, 0444);
+
+#define SRCU_INTERVAL          1               // Base delay if no expedited GPs pending.
+#define SRCU_MAX_INTERVAL      10              // Maximum incremental delay from slow readers.
+
+#define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO      3UL     // Lowmark on default per-GP-phase
+                                                       // no-delay instances.
+#define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI      1000UL  // Highmark on default per-GP-phase
+                                                       // no-delay instances.
+
+#define SRCU_UL_CLAMP_LO(val, low)     ((val) > (low) ? (val) : (low))
+#define SRCU_UL_CLAMP_HI(val, high)    ((val) < (high) ? (val) : (high))
+#define SRCU_UL_CLAMP(val, low, high)  SRCU_UL_CLAMP_HI(SRCU_UL_CLAMP_LO((val), (low)), (high))
+// per-GP-phase no-delay instances adjusted to allow non-sleeping poll upto
+// one jiffies time duration. Mult by 2 is done to factor in the srcu_get_delay()
+// called from process_srcu().
+#define SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED        \
+       (2UL * USEC_PER_SEC / HZ / SRCU_DEFAULT_RETRY_CHECK_DELAY)
+
+// Maximum per-GP-phase consecutive no-delay instances.
+#define SRCU_DEFAULT_MAX_NODELAY_PHASE \
+       SRCU_UL_CLAMP(SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED,  \
+                     SRCU_DEFAULT_MAX_NODELAY_PHASE_LO,        \
+                     SRCU_DEFAULT_MAX_NODELAY_PHASE_HI)
+
+static ulong srcu_max_nodelay_phase = SRCU_DEFAULT_MAX_NODELAY_PHASE;
+module_param(srcu_max_nodelay_phase, ulong, 0444);
+
+// Maximum consecutive no-delay instances.
+#define SRCU_DEFAULT_MAX_NODELAY       (SRCU_DEFAULT_MAX_NODELAY_PHASE > 100 ? \
+                                        SRCU_DEFAULT_MAX_NODELAY_PHASE : 100)
+
+static ulong srcu_max_nodelay = SRCU_DEFAULT_MAX_NODELAY;
+module_param(srcu_max_nodelay, ulong, 0444);
 
 /*
  * Return grace-period delay, zero if there are expedited grace
@@ -522,16 +564,22 @@ static bool srcu_readers_active(struct srcu_struct *ssp)
  */
 static unsigned long srcu_get_delay(struct srcu_struct *ssp)
 {
+       unsigned long gpstart;
+       unsigned long j;
        unsigned long jbase = SRCU_INTERVAL;
 
        if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
                jbase = 0;
-       if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)))
-               jbase += jiffies - READ_ONCE(ssp->srcu_gp_start);
-       if (!jbase) {
-               WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
-               if (READ_ONCE(ssp->srcu_n_exp_nodelay) > SRCU_MAX_NODELAY_PHASE)
-                       jbase = 1;
+       if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))) {
+               j = jiffies - 1;
+               gpstart = READ_ONCE(ssp->srcu_gp_start);
+               if (time_after(j, gpstart))
+                       jbase += j - gpstart;
+               if (!jbase) {
+                       WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
+                       if (READ_ONCE(ssp->srcu_n_exp_nodelay) > srcu_max_nodelay_phase)
+                               jbase = 1;
+               }
        }
        return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
 }
@@ -607,15 +655,6 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
 
 /*
- * We use an adaptive strategy for synchronize_srcu() and especially for
- * synchronize_srcu_expedited().  We spin for a fixed time period
- * (defined below) to allow SRCU readers to exit their read-side critical
- * sections.  If there are still some readers after a few microseconds,
- * we repeatedly block for 1-millisecond time periods.
- */
-#define SRCU_RETRY_CHECK_DELAY         5
-
-/*
  * Start an SRCU grace period.
  */
 static void srcu_gp_start(struct srcu_struct *ssp)
@@ -700,7 +739,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp
  */
 static void srcu_gp_end(struct srcu_struct *ssp)
 {
-       unsigned long cbdelay;
+       unsigned long cbdelay = 1;
        bool cbs;
        bool last_lvl;
        int cpu;
@@ -720,7 +759,9 @@ static void srcu_gp_end(struct srcu_struct *ssp)
        spin_lock_irq_rcu_node(ssp);
        idx = rcu_seq_state(ssp->srcu_gp_seq);
        WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
-       cbdelay = !!srcu_get_delay(ssp);
+       if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
+               cbdelay = 0;
+
        WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
        rcu_seq_end(&ssp->srcu_gp_seq);
        gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
@@ -921,12 +962,16 @@ static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
  */
 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
 {
+       unsigned long curdelay;
+
+       curdelay = !srcu_get_delay(ssp);
+
        for (;;) {
                if (srcu_readers_active_idx_check(ssp, idx))
                        return true;
-               if (--trycount + !srcu_get_delay(ssp) <= 0)
+               if ((--trycount + curdelay) <= 0)
                        return false;
-               udelay(SRCU_RETRY_CHECK_DELAY);
+               udelay(srcu_retry_check_delay);
        }
 }
 
@@ -1582,7 +1627,7 @@ static void process_srcu(struct work_struct *work)
                j = jiffies;
                if (READ_ONCE(ssp->reschedule_jiffies) == j) {
                        WRITE_ONCE(ssp->reschedule_count, READ_ONCE(ssp->reschedule_count) + 1);
-                       if (READ_ONCE(ssp->reschedule_count) > SRCU_MAX_NODELAY)
+                       if (READ_ONCE(ssp->reschedule_count) > srcu_max_nodelay)
                                curdelay = 1;
                } else {
                        WRITE_ONCE(ssp->reschedule_count, 1);
@@ -1674,6 +1719,11 @@ static int __init srcu_bootup_announce(void)
        pr_info("Hierarchical SRCU implementation.\n");
        if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
                pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
+       if (srcu_retry_check_delay != SRCU_DEFAULT_RETRY_CHECK_DELAY)
+               pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay);
+       if (srcu_max_nodelay != SRCU_DEFAULT_MAX_NODELAY)
+               pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay);
+       pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase);
        return 0;
 }
 early_initcall(srcu_bootup_announce);
index 4995c07..a001e1e 100644 (file)
@@ -647,7 +647,6 @@ static void print_cpu_stall(unsigned long gps)
         * See Documentation/RCU/stallwarn.rst for info on how to debug
         * RCU CPU stall warnings.
         */
-       printk_prefer_direct_enter();
        trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
        pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
        raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
@@ -685,7 +684,6 @@ static void print_cpu_stall(unsigned long gps)
         */
        set_tsk_need_resched(current);
        set_preempt_need_resched();
-       printk_prefer_direct_exit();
 }
 
 static void check_cpu_stall(struct rcu_data *rdp)
index b5a71d1..3c35445 100644 (file)
@@ -819,11 +819,9 @@ static int __orderly_reboot(void)
        ret = run_cmd(reboot_cmd);
 
        if (ret) {
-               printk_prefer_direct_enter();
                pr_warn("Failed to start orderly reboot: forcing the issue\n");
                emergency_sync();
                kernel_restart(NULL);
-               printk_prefer_direct_exit();
        }
 
        return ret;
@@ -836,7 +834,6 @@ static int __orderly_poweroff(bool force)
        ret = run_cmd(poweroff_cmd);
 
        if (ret && force) {
-               printk_prefer_direct_enter();
                pr_warn("Failed to start orderly shutdown: forcing the issue\n");
 
                /*
@@ -846,7 +843,6 @@ static int __orderly_poweroff(bool force)
                 */
                emergency_sync();
                kernel_power_off();
-               printk_prefer_direct_exit();
        }
 
        return ret;
@@ -904,8 +900,6 @@ EXPORT_SYMBOL_GPL(orderly_reboot);
  */
 static void hw_failure_emergency_poweroff_func(struct work_struct *work)
 {
-       printk_prefer_direct_enter();
-
        /*
         * We have reached here after the emergency shutdown waiting period has
         * expired. This means orderly_poweroff has not been able to shut off
@@ -922,8 +916,6 @@ static void hw_failure_emergency_poweroff_func(struct work_struct *work)
         */
        pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
        emergency_restart();
-
-       printk_prefer_direct_exit();
 }
 
 static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
@@ -962,13 +954,11 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
 {
        static atomic_t allow_proceed = ATOMIC_INIT(1);
 
-       printk_prefer_direct_enter();
-
        pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
 
        /* Shutdown should be initiated only once. */
        if (!atomic_dec_and_test(&allow_proceed))
-               goto out;
+               return;
 
        /*
         * Queue a backup emergency shutdown in the event of
@@ -976,8 +966,6 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
         */
        hw_failure_emergency_poweroff(ms_until_forced);
        orderly_poweroff(true);
-out:
-       printk_prefer_direct_exit();
 }
 EXPORT_SYMBOL_GPL(hw_protection_shutdown);
 
index a463dbc..5555e49 100644 (file)
@@ -4798,25 +4798,55 @@ static void do_balance_callbacks(struct rq *rq, struct callback_head *head)
 
 static void balance_push(struct rq *rq);
 
+/*
+ * balance_push_callback is a right abuse of the callback interface and plays
+ * by significantly different rules.
+ *
+ * Where the normal balance_callback's purpose is to be ran in the same context
+ * that queued it (only later, when it's safe to drop rq->lock again),
+ * balance_push_callback is specifically targeted at __schedule().
+ *
+ * This abuse is tolerated because it places all the unlikely/odd cases behind
+ * a single test, namely: rq->balance_callback == NULL.
+ */
 struct callback_head balance_push_callback = {
        .next = NULL,
        .func = (void (*)(struct callback_head *))balance_push,
 };
 
-static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
+static inline struct callback_head *
+__splice_balance_callbacks(struct rq *rq, bool split)
 {
        struct callback_head *head = rq->balance_callback;
 
+       if (likely(!head))
+               return NULL;
+
        lockdep_assert_rq_held(rq);
-       if (head)
+       /*
+        * Must not take balance_push_callback off the list when
+        * splice_balance_callbacks() and balance_callbacks() are not
+        * in the same rq->lock section.
+        *
+        * In that case it would be possible for __schedule() to interleave
+        * and observe the list empty.
+        */
+       if (split && head == &balance_push_callback)
+               head = NULL;
+       else
                rq->balance_callback = NULL;
 
        return head;
 }
 
+static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
+{
+       return __splice_balance_callbacks(rq, true);
+}
+
 static void __balance_callbacks(struct rq *rq)
 {
-       do_balance_callbacks(rq, splice_balance_callbacks(rq));
+       do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
 }
 
 static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
index 5867e18..0ab79d8 100644 (file)
@@ -1703,7 +1703,10 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
                 * the throttle.
                 */
                p->dl.dl_throttled = 0;
-               BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
+               if (!(flags & ENQUEUE_REPLENISH))
+                       printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
+                                            task_pid_nr(p));
+
                return;
        }
 
index 73ae328..aad7f5e 100644 (file)
@@ -1742,6 +1742,11 @@ queue_balance_callback(struct rq *rq,
 {
        lockdep_assert_rq_held(rq);
 
+       /*
+        * Don't (re)queue an already queued item; nor queue anything when
+        * balance_push() is active, see the comment with
+        * balance_push_callback.
+        */
        if (unlikely(head->next || rq->balance_callback == &balance_push_callback))
                return;
 
index edb1dc9..6f86fda 100644 (file)
@@ -2029,12 +2029,12 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
        bool autoreap = false;
        u64 utime, stime;
 
-       BUG_ON(sig == -1);
+       WARN_ON_ONCE(sig == -1);
 
-       /* do_notify_parent_cldstop should have been called instead.  */
-       BUG_ON(task_is_stopped_or_traced(tsk));
+       /* do_notify_parent_cldstop should have been called instead.  */
+       WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
 
-       BUG_ON(!tsk->ptrace &&
+       WARN_ON_ONCE(!tsk->ptrace &&
               (tsk->group_leader != tsk || !thread_group_empty(tsk)));
 
        /* Wake up all pidfd waiters */
index e52b6e3..35d0342 100644 (file)
@@ -446,14 +446,14 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp,
                if (*negp) {
                        if (*lvalp > (unsigned long) INT_MAX + 1)
                                return -EINVAL;
-                       *valp = -*lvalp;
+                       WRITE_ONCE(*valp, -*lvalp);
                } else {
                        if (*lvalp > (unsigned long) INT_MAX)
                                return -EINVAL;
-                       *valp = *lvalp;
+                       WRITE_ONCE(*valp, *lvalp);
                }
        } else {
-               int val = *valp;
+               int val = READ_ONCE(*valp);
                if (val < 0) {
                        *negp = true;
                        *lvalp = -(unsigned long)val;
@@ -472,9 +472,9 @@ static int do_proc_douintvec_conv(unsigned long *lvalp,
        if (write) {
                if (*lvalp > UINT_MAX)
                        return -EINVAL;
-               *valp = *lvalp;
+               WRITE_ONCE(*valp, *lvalp);
        } else {
-               unsigned int val = *valp;
+               unsigned int val = READ_ONCE(*valp);
                *lvalp = (unsigned long)val;
        }
        return 0;
@@ -857,7 +857,7 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
                if ((param->min && *param->min > tmp) ||
                    (param->max && *param->max < tmp))
                        return -EINVAL;
-               *valp = tmp;
+               WRITE_ONCE(*valp, tmp);
        }
 
        return 0;
@@ -923,7 +923,7 @@ static int do_proc_douintvec_minmax_conv(unsigned long *lvalp,
                    (param->max && *param->max < tmp))
                        return -ERANGE;
 
-               *valp = tmp;
+               WRITE_ONCE(*valp, tmp);
        }
 
        return 0;
@@ -1007,13 +1007,13 @@ int proc_dou8vec_minmax(struct ctl_table *table, int write,
 
        tmp.maxlen = sizeof(val);
        tmp.data = &val;
-       val = *data;
+       val = READ_ONCE(*data);
        res = do_proc_douintvec(&tmp, write, buffer, lenp, ppos,
                                do_proc_douintvec_minmax_conv, &param);
        if (res)
                return res;
        if (write)
-               *data = val;
+               WRITE_ONCE(*data, val);
        return 0;
 }
 EXPORT_SYMBOL_GPL(proc_dou8vec_minmax);
@@ -1090,9 +1090,9 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table,
                                err = -EINVAL;
                                break;
                        }
-                       *i = val;
+                       WRITE_ONCE(*i, val);
                } else {
-                       val = convdiv * (*i) / convmul;
+                       val = convdiv * READ_ONCE(*i) / convmul;
                        if (!first)
                                proc_put_char(&buffer, &left, '\t');
                        proc_put_long(&buffer, &left, val, false);
@@ -1173,9 +1173,12 @@ static int do_proc_dointvec_jiffies_conv(bool *negp, unsigned long *lvalp,
        if (write) {
                if (*lvalp > INT_MAX / HZ)
                        return 1;
-               *valp = *negp ? -(*lvalp*HZ) : (*lvalp*HZ);
+               if (*negp)
+                       WRITE_ONCE(*valp, -*lvalp * HZ);
+               else
+                       WRITE_ONCE(*valp, *lvalp * HZ);
        } else {
-               int val = *valp;
+               int val = READ_ONCE(*valp);
                unsigned long lval;
                if (val < 0) {
                        *negp = true;
@@ -1221,9 +1224,9 @@ static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp,
 
                if (jif > INT_MAX)
                        return 1;
-               *valp = (int)jif;
+               WRITE_ONCE(*valp, (int)jif);
        } else {
-               int val = *valp;
+               int val = READ_ONCE(*valp);
                unsigned long lval;
                if (val < 0) {
                        *negp = true;
@@ -1291,8 +1294,8 @@ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write,
  * @ppos: the current position in the file
  *
  * Reads/writes up to table->maxlen/sizeof(unsigned int) integer
- * values from/to the user buffer, treated as an ASCII string. 
- * The values read are assumed to be in 1/1000 seconds, and 
+ * values from/to the user buffer, treated as an ASCII string.
+ * The values read are assumed to be in 1/1000 seconds, and
  * are converted into jiffies.
  *
  * Returns 0 on success.
@@ -2091,6 +2094,17 @@ static struct ctl_table vm_table[] = {
                .extra1         = SYSCTL_ZERO,
                .extra2         = SYSCTL_TWO_HUNDRED,
        },
+#ifdef CONFIG_NUMA
+       {
+               .procname       = "numa_stat",
+               .data           = &sysctl_vm_numa_stat,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = sysctl_vm_numa_stat_handler,
+               .extra1         = SYSCTL_ZERO,
+               .extra2         = SYSCTL_ONE,
+       },
+#endif
 #ifdef CONFIG_HUGETLB_PAGE
        {
                .procname       = "nr_hugepages",
@@ -2107,15 +2121,6 @@ static struct ctl_table vm_table[] = {
                .mode           = 0644,
                .proc_handler   = &hugetlb_mempolicy_sysctl_handler,
        },
-       {
-               .procname               = "numa_stat",
-               .data                   = &sysctl_vm_numa_stat,
-               .maxlen                 = sizeof(int),
-               .mode                   = 0644,
-               .proc_handler   = sysctl_vm_numa_stat_handler,
-               .extra1                 = SYSCTL_ZERO,
-               .extra2                 = SYSCTL_ONE,
-       },
 #endif
         {
                .procname       = "hugetlb_shm_group",
index 1cd10b1..5dead89 100644 (file)
@@ -1051,15 +1051,24 @@ retry_delete:
 }
 
 /*
- * This is called by do_exit or de_thread, only when there are no more
- * references to the shared signal_struct.
+ * This is called by do_exit or de_thread, only when nobody else can
+ * modify the signal->posix_timers list. Yet we need sighand->siglock
+ * to prevent the race with /proc/pid/timers.
  */
-void exit_itimers(struct signal_struct *sig)
+void exit_itimers(struct task_struct *tsk)
 {
+       struct list_head timers;
        struct k_itimer *tmr;
 
-       while (!list_empty(&sig->posix_timers)) {
-               tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
+       if (list_empty(&tsk->signal->posix_timers))
+               return;
+
+       spin_lock_irq(&tsk->sighand->siglock);
+       list_replace_init(&tsk->signal->posix_timers, &timers);
+       spin_unlock_irq(&tsk->sighand->siglock);
+
+       while (!list_empty(&timers)) {
+               tmr = list_first_entry(&timers, struct k_itimer, list);
                itimer_delete(tmr);
        }
 }
index 58a11f8..3004958 100644 (file)
@@ -526,7 +526,6 @@ void __init tick_nohz_full_setup(cpumask_var_t cpumask)
        cpumask_copy(tick_nohz_full_mask, cpumask);
        tick_nohz_full_running = true;
 }
-EXPORT_SYMBOL_GPL(tick_nohz_full_setup);
 
 static int tick_nohz_cpu_down(unsigned int cpu)
 {
index debbbb0..ccd6a5a 100644 (file)
@@ -194,7 +194,8 @@ config FUNCTION_TRACER
          sequence is then dynamically patched into a tracer call when
          tracing is enabled by the administrator. If it's runtime disabled
          (the bootup default), then the overhead of the instructions is very
-         small and not measurable even in micro-benchmarks.
+         small and not measurable even in micro-benchmarks (at least on
+         x86, but may have impact on other architectures).
 
 config FUNCTION_GRAPH_TRACER
        bool "Kernel Function Graph Tracer"
index 10a32b0..fe04c6f 100644 (file)
@@ -770,14 +770,11 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
  **/
 void blk_trace_shutdown(struct request_queue *q)
 {
-       mutex_lock(&q->debugfs_mutex);
        if (rcu_dereference_protected(q->blk_trace,
                                      lockdep_is_held(&q->debugfs_mutex))) {
                __blk_trace_startstop(q, 0);
                __blk_trace_remove(q);
        }
-
-       mutex_unlock(&q->debugfs_mutex);
 }
 
 #ifdef CONFIG_BLK_CGROUP
index 7a13e6a..88589d7 100644 (file)
@@ -2423,7 +2423,7 @@ kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip,
        kprobe_multi_link_prog_run(link, entry_ip, regs);
 }
 
-static int symbols_cmp(const void *a, const void *b)
+static int symbols_cmp_r(const void *a, const void *b, const void *priv)
 {
        const char **str_a = (const char **) a;
        const char **str_b = (const char **) b;
@@ -2431,6 +2431,28 @@ static int symbols_cmp(const void *a, const void *b)
        return strcmp(*str_a, *str_b);
 }
 
+struct multi_symbols_sort {
+       const char **funcs;
+       u64 *cookies;
+};
+
+static void symbols_swap_r(void *a, void *b, int size, const void *priv)
+{
+       const struct multi_symbols_sort *data = priv;
+       const char **name_a = a, **name_b = b;
+
+       swap(*name_a, *name_b);
+
+       /* If defined, swap also related cookies. */
+       if (data->cookies) {
+               u64 *cookie_a, *cookie_b;
+
+               cookie_a = data->cookies + (name_a - data->funcs);
+               cookie_b = data->cookies + (name_b - data->funcs);
+               swap(*cookie_a, *cookie_b);
+       }
+}
+
 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
 {
        struct bpf_kprobe_multi_link *link = NULL;
@@ -2468,38 +2490,46 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
        if (!addrs)
                return -ENOMEM;
 
+       ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
+       if (ucookies) {
+               cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
+               if (!cookies) {
+                       err = -ENOMEM;
+                       goto error;
+               }
+               if (copy_from_user(cookies, ucookies, size)) {
+                       err = -EFAULT;
+                       goto error;
+               }
+       }
+
        if (uaddrs) {
                if (copy_from_user(addrs, uaddrs, size)) {
                        err = -EFAULT;
                        goto error;
                }
        } else {
+               struct multi_symbols_sort data = {
+                       .cookies = cookies,
+               };
                struct user_syms us;
 
                err = copy_user_syms(&us, usyms, cnt);
                if (err)
                        goto error;
 
-               sort(us.syms, cnt, sizeof(*us.syms), symbols_cmp, NULL);
+               if (cookies)
+                       data.funcs = us.syms;
+
+               sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
+                      symbols_swap_r, &data);
+
                err = ftrace_lookup_symbols(us.syms, cnt, addrs);
                free_user_syms(&us);
                if (err)
                        goto error;
        }
 
-       ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
-       if (ucookies) {
-               cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
-               if (!cookies) {
-                       err = -ENOMEM;
-                       goto error;
-               }
-               if (copy_from_user(cookies, ucookies, size)) {
-                       err = -EFAULT;
-                       goto error;
-               }
-       }
-
        link = kzalloc(sizeof(*link), GFP_KERNEL);
        if (!link) {
                err = -ENOMEM;
index e750fe1..601ccf1 100644 (file)
@@ -8029,15 +8029,23 @@ static int kallsyms_callback(void *data, const char *name,
                             struct module *mod, unsigned long addr)
 {
        struct kallsyms_data *args = data;
+       const char **sym;
+       int idx;
 
-       if (!bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp))
+       sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
+       if (!sym)
+               return 0;
+
+       idx = sym - args->syms;
+       if (args->addrs[idx])
                return 0;
 
        addr = ftrace_location(addr);
        if (!addr)
                return 0;
 
-       args->addrs[args->found++] = addr;
+       args->addrs[idx] = addr;
+       args->found++;
        return args->found == args->cnt ? 1 : 0;
 }
 
@@ -8062,6 +8070,7 @@ int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *a
        struct kallsyms_data args;
        int err;
 
+       memset(addrs, 0, sizeof(*addrs) * cnt);
        args.addrs = addrs;
        args.syms = sorted_syms;
        args.cnt = cnt;
index b568337..c69d822 100644 (file)
@@ -154,6 +154,15 @@ struct rethook_node *rethook_try_get(struct rethook *rh)
        if (unlikely(!handler))
                return NULL;
 
+       /*
+        * This expects the caller will set up a rethook on a function entry.
+        * When the function returns, the rethook will eventually be reclaimed
+        * or released in the rethook_recycle() with call_rcu().
+        * This means the caller must be run in the RCU-availabe context.
+        */
+       if (unlikely(!rcu_is_watching()))
+               return NULL;
+
        fn = freelist_try_get(&rh->pool);
        if (!fn)
                return NULL;
index 2c95992..b8dd546 100644 (file)
@@ -6424,9 +6424,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
                synchronize_rcu();
                free_snapshot(tr);
        }
-#endif
 
-#ifdef CONFIG_TRACER_MAX_TRACE
        if (t->use_max_tr && !had_max_tr) {
                ret = tracing_alloc_snapshot_instance(tr);
                if (ret < 0)
@@ -9866,6 +9864,12 @@ void trace_init_global_iter(struct trace_iterator *iter)
        /* Output in nanoseconds only if we are using a clock in nanoseconds. */
        if (trace_clocks[iter->tr->clock_id].in_ns)
                iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
+
+       /* Can not use kmalloc for iter.temp and iter.fmt */
+       iter->temp = static_temp_buf;
+       iter->temp_size = STATIC_TEMP_BUF_SIZE;
+       iter->fmt = static_fmt_buf;
+       iter->fmt_size = STATIC_FMT_BUF_SIZE;
 }
 
 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
@@ -9898,11 +9902,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
 
        /* Simulate the iterator */
        trace_init_global_iter(&iter);
-       /* Can not use kmalloc for iter.temp and iter.fmt */
-       iter.temp = static_temp_buf;
-       iter.temp_size = STATIC_TEMP_BUF_SIZE;
-       iter.fmt = static_fmt_buf;
-       iter.fmt_size = STATIC_FMT_BUF_SIZE;
 
        for_each_tracing_cpu(cpu) {
                atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
index 48e82e1..e87a467 100644 (file)
@@ -4430,6 +4430,8 @@ static int parse_var_defs(struct hist_trigger_data *hist_data)
 
                        s = kstrdup(field_str, GFP_KERNEL);
                        if (!s) {
+                               kfree(hist_data->attrs->var_defs.name[n_vars]);
+                               hist_data->attrs->var_defs.name[n_vars] = NULL;
                                ret = -ENOMEM;
                                goto free;
                        }
index 9350733..a245ea6 100644 (file)
@@ -1718,8 +1718,17 @@ static int
 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
        struct kretprobe *rp = get_kretprobe(ri);
-       struct trace_kprobe *tk = container_of(rp, struct trace_kprobe, rp);
+       struct trace_kprobe *tk;
+
+       /*
+        * There is a small chance that get_kretprobe(ri) returns NULL when
+        * the kretprobe is unregister on another CPU between kretprobe's
+        * trampoline_handler and this function.
+        */
+       if (unlikely(!rp))
+               return 0;
 
+       tk = container_of(rp, struct trace_kprobe, rp);
        raw_cpu_inc(*tk->nhit);
 
        if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
index 9711589..c3dc4f8 100644 (file)
@@ -546,7 +546,6 @@ static int __trace_uprobe_create(int argc, const char **argv)
        bool is_return = false;
        int i, ret;
 
-       ret = 0;
        ref_ctr_offset = 0;
 
        switch (argv[0][0]) {
index 230038d..59ddb00 100644 (file)
@@ -34,6 +34,27 @@ MODULE_LICENSE("GPL");
 #define WATCH_QUEUE_NOTE_SIZE 128
 #define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE)
 
+/*
+ * This must be called under the RCU read-lock, which makes
+ * sure that the wqueue still exists. It can then take the lock,
+ * and check that the wqueue hasn't been destroyed, which in
+ * turn makes sure that the notification pipe still exists.
+ */
+static inline bool lock_wqueue(struct watch_queue *wqueue)
+{
+       spin_lock_bh(&wqueue->lock);
+       if (unlikely(wqueue->defunct)) {
+               spin_unlock_bh(&wqueue->lock);
+               return false;
+       }
+       return true;
+}
+
+static inline void unlock_wqueue(struct watch_queue *wqueue)
+{
+       spin_unlock_bh(&wqueue->lock);
+}
+
 static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
                                         struct pipe_buffer *buf)
 {
@@ -69,6 +90,10 @@ static const struct pipe_buf_operations watch_queue_pipe_buf_ops = {
 
 /*
  * Post a notification to a watch queue.
+ *
+ * Must be called with the RCU lock for reading, and the
+ * watch_queue lock held, which guarantees that the pipe
+ * hasn't been released.
  */
 static bool post_one_notification(struct watch_queue *wqueue,
                                  struct watch_notification *n)
@@ -85,9 +110,6 @@ static bool post_one_notification(struct watch_queue *wqueue,
 
        spin_lock_irq(&pipe->rd_wait.lock);
 
-       if (wqueue->defunct)
-               goto out;
-
        mask = pipe->ring_size - 1;
        head = pipe->head;
        tail = pipe->tail;
@@ -203,7 +225,10 @@ void __post_watch_notification(struct watch_list *wlist,
                if (security_post_notification(watch->cred, cred, n) < 0)
                        continue;
 
-               post_one_notification(wqueue, n);
+               if (lock_wqueue(wqueue)) {
+                       post_one_notification(wqueue, n);
+                       unlock_wqueue(wqueue);
+               }
        }
 
        rcu_read_unlock();
@@ -429,6 +454,33 @@ void init_watch(struct watch *watch, struct watch_queue *wqueue)
        rcu_assign_pointer(watch->queue, wqueue);
 }
 
+static int add_one_watch(struct watch *watch, struct watch_list *wlist, struct watch_queue *wqueue)
+{
+       const struct cred *cred;
+       struct watch *w;
+
+       hlist_for_each_entry(w, &wlist->watchers, list_node) {
+               struct watch_queue *wq = rcu_access_pointer(w->queue);
+               if (wqueue == wq && watch->id == w->id)
+                       return -EBUSY;
+       }
+
+       cred = current_cred();
+       if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) {
+               atomic_dec(&cred->user->nr_watches);
+               return -EAGAIN;
+       }
+
+       watch->cred = get_cred(cred);
+       rcu_assign_pointer(watch->watch_list, wlist);
+
+       kref_get(&wqueue->usage);
+       kref_get(&watch->usage);
+       hlist_add_head(&watch->queue_node, &wqueue->watches);
+       hlist_add_head_rcu(&watch->list_node, &wlist->watchers);
+       return 0;
+}
+
 /**
  * add_watch_to_object - Add a watch on an object to a watch list
  * @watch: The watch to add
@@ -443,33 +495,21 @@ void init_watch(struct watch *watch, struct watch_queue *wqueue)
  */
 int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
 {
-       struct watch_queue *wqueue = rcu_access_pointer(watch->queue);
-       struct watch *w;
-
-       hlist_for_each_entry(w, &wlist->watchers, list_node) {
-               struct watch_queue *wq = rcu_access_pointer(w->queue);
-               if (wqueue == wq && watch->id == w->id)
-                       return -EBUSY;
-       }
+       struct watch_queue *wqueue;
+       int ret = -ENOENT;
 
-       watch->cred = get_current_cred();
-       rcu_assign_pointer(watch->watch_list, wlist);
+       rcu_read_lock();
 
-       if (atomic_inc_return(&watch->cred->user->nr_watches) >
-           task_rlimit(current, RLIMIT_NOFILE)) {
-               atomic_dec(&watch->cred->user->nr_watches);
-               put_cred(watch->cred);
-               return -EAGAIN;
+       wqueue = rcu_access_pointer(watch->queue);
+       if (lock_wqueue(wqueue)) {
+               spin_lock(&wlist->lock);
+               ret = add_one_watch(watch, wlist, wqueue);
+               spin_unlock(&wlist->lock);
+               unlock_wqueue(wqueue);
        }
 
-       spin_lock_bh(&wqueue->lock);
-       kref_get(&wqueue->usage);
-       kref_get(&watch->usage);
-       hlist_add_head(&watch->queue_node, &wqueue->watches);
-       spin_unlock_bh(&wqueue->lock);
-
-       hlist_add_head(&watch->list_node, &wlist->watchers);
-       return 0;
+       rcu_read_unlock();
+       return ret;
 }
 EXPORT_SYMBOL(add_watch_to_object);
 
@@ -520,20 +560,15 @@ found:
 
        wqueue = rcu_dereference(watch->queue);
 
-       /* We don't need the watch list lock for the next bit as RCU is
-        * protecting *wqueue from deallocation.
-        */
-       if (wqueue) {
+       if (lock_wqueue(wqueue)) {
                post_one_notification(wqueue, &n.watch);
 
-               spin_lock_bh(&wqueue->lock);
-
                if (!hlist_unhashed(&watch->queue_node)) {
                        hlist_del_init_rcu(&watch->queue_node);
                        put_watch(watch);
                }
 
-               spin_unlock_bh(&wqueue->lock);
+               unlock_wqueue(wqueue);
        }
 
        if (wlist->release_watch) {
index 20a7a55..ecb0e83 100644 (file)
@@ -424,8 +424,6 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
                /* Start period for the next softlockup warning. */
                update_report_ts();
 
-               printk_prefer_direct_enter();
-
                pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
                        smp_processor_id(), duration,
                        current->comm, task_pid_nr(current));
@@ -444,8 +442,6 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
                add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
                if (softlockup_panic)
                        panic("softlockup: hung tasks");
-
-               printk_prefer_direct_exit();
        }
 
        return HRTIMER_RESTART;
index 701f35f..247bf0b 100644 (file)
@@ -135,8 +135,6 @@ static void watchdog_overflow_callback(struct perf_event *event,
                if (__this_cpu_read(hard_watchdog_warn) == true)
                        return;
 
-               printk_prefer_direct_enter();
-
                pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
                         this_cpu);
                print_modules();
@@ -157,8 +155,6 @@ static void watchdog_overflow_callback(struct perf_event *event,
                if (hardlockup_panic)
                        nmi_panic(regs, "Hard LOCKUP");
 
-               printk_prefer_direct_exit();
-
                __this_cpu_write(hard_watchdog_warn, true);
                return;
        }
index 1ea50f6..aa8a82b 100644 (file)
@@ -5001,7 +5001,10 @@ static void unbind_workers(int cpu)
 
                for_each_pool_worker(worker, pool) {
                        kthread_set_per_cpu(worker->task, -1);
-                       WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
+                       if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
+                               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
+                       else
+                               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
                }
 
                mutex_unlock(&wq_pool_attach_mutex);
index 6a84363..eaaad4d 100644 (file)
@@ -120,6 +120,9 @@ config INDIRECT_IOMEM_FALLBACK
 
 source "lib/crypto/Kconfig"
 
+config LIB_MEMNEQ
+       bool
+
 config CRC_CCITT
        tristate "CRC-CCITT functions"
        help
index c4fe15d..fd15230 100644 (file)
@@ -84,6 +84,9 @@ config UBSAN_SHIFT
 config UBSAN_DIV_ZERO
        bool "Perform checking for integer divide-by-zero"
        depends on $(cc-option,-fsanitize=integer-divide-by-zero)
+       # https://github.com/ClangBuiltLinux/linux/issues/1657
+       # https://github.com/llvm/llvm-project/issues/56289
+       depends on !CC_IS_CLANG
        help
          This option enables -fsanitize=integer-divide-by-zero which checks
          for integer division by zero. This is effectively redundant with the
@@ -94,7 +97,7 @@ config UBSAN_UNREACHABLE
        bool "Perform checking for unreachable code"
        # objtool already handles unreachable checking and gets angry about
        # seeing UBSan instrumentation located in unreachable places.
-       depends on !(OBJTOOL && (STACK_VALIDATION || UNWINDER_ORC || X86_SMAP))
+       depends on !(OBJTOOL && (STACK_VALIDATION || UNWINDER_ORC || HAVE_UACCESS_VALIDATION))
        depends on $(cc-option,-fsanitize=unreachable)
        help
          This option enables -fsanitize=unreachable which checks for control
index ea54294..f99bf61 100644 (file)
@@ -251,6 +251,7 @@ obj-$(CONFIG_DIMLIB) += dim/
 obj-$(CONFIG_SIGNATURE) += digsig.o
 
 lib-$(CONFIG_CLZ_TAB) += clz_tab.o
+lib-$(CONFIG_LIB_MEMNEQ) += memneq.o
 
 obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
 obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
index 9856e29..2082af4 100644 (file)
@@ -71,6 +71,7 @@ config CRYPTO_LIB_CURVE25519
        tristate "Curve25519 scalar multiplication library"
        depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
        select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
+       select LIB_MEMNEQ
        help
          Enable the Curve25519 library interface. This interface may be
          fulfilled by either the generic implementation or an arch-specific
index f4ab4f4..7ecdfdb 100644 (file)
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -491,7 +491,8 @@ void ida_free(struct ida *ida, unsigned int id)
        struct ida_bitmap *bitmap;
        unsigned long flags;
 
-       BUG_ON((int)id < 0);
+       if ((int)id < 0)
+               return;
 
        xas_lock_irqsave(&xas, flags);
        bitmap = xas_load(&xas);
index c6f0b18..45e93ec 100644 (file)
@@ -111,31 +111,6 @@ int lockref_put_not_zero(struct lockref *lockref)
 EXPORT_SYMBOL(lockref_put_not_zero);
 
 /**
- * lockref_get_or_lock - Increments count unless the count is 0 or dead
- * @lockref: pointer to lockref structure
- * Return: 1 if count updated successfully or 0 if count was zero
- * and we got the lock instead.
- */
-int lockref_get_or_lock(struct lockref *lockref)
-{
-       CMPXCHG_LOOP(
-               new.count++;
-               if (old.count <= 0)
-                       break;
-       ,
-               return 1;
-       );
-
-       spin_lock(&lockref->lock);
-       if (lockref->count <= 0)
-               return 0;
-       lockref->count++;
-       spin_unlock(&lockref->lock);
-       return 1;
-}
-EXPORT_SYMBOL(lockref_get_or_lock);
-
-/**
  * lockref_put_return - Decrement reference count if possible
  * @lockref: pointer to lockref structure
  *
similarity index 100%
rename from crypto/memneq.c
rename to lib/memneq.c
index ae4fd4d..29eb048 100644 (file)
@@ -528,7 +528,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
 
                sbitmap_deferred_clear(map);
                if (map->word == (1UL << (map_depth - 1)) - 1)
-                       continue;
+                       goto next;
 
                nr = find_first_zero_bit(&map->word, map_depth);
                if (nr + nr_tags <= map_depth) {
@@ -539,6 +539,8 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
                        get_mask = ((1UL << map_tags) - 1) << nr;
                        do {
                                val = READ_ONCE(map->word);
+                               if ((val & ~get_mask) != val)
+                                       goto next;
                                ret = atomic_long_cmpxchg(ptr, val, get_mask | val);
                        } while (ret != val);
                        get_mask = (get_mask & ~ret) >> nr;
@@ -549,6 +551,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
                                return get_mask;
                        }
                }
+next:
                /* Jump to next index. */
                if (++index >= sb->map_nr)
                        index = 0;
index ff60bd7..95550b8 100644 (file)
@@ -231,20 +231,13 @@ static __init int bdi_class_init(void)
 }
 postcore_initcall(bdi_class_init);
 
-static int bdi_init(struct backing_dev_info *bdi);
-
 static int __init default_bdi_init(void)
 {
-       int err;
-
        bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
                                 WQ_SYSFS, 0);
        if (!bdi_wq)
                return -ENOMEM;
-
-       err = bdi_init(&noop_backing_dev_info);
-
-       return err;
+       return 0;
 }
 subsys_initcall(default_bdi_init);
 
@@ -781,7 +774,7 @@ static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
 
 #endif /* CONFIG_CGROUP_WRITEBACK */
 
-static int bdi_init(struct backing_dev_info *bdi)
+int bdi_init(struct backing_dev_info *bdi)
 {
        int ret;
 
index 8efbfb2..4b07c29 100644 (file)
@@ -374,6 +374,8 @@ static void damon_reclaim_timer_fn(struct work_struct *work)
 }
 static DECLARE_DELAYED_WORK(damon_reclaim_timer, damon_reclaim_timer_fn);
 
+static bool damon_reclaim_initialized;
+
 static int enabled_store(const char *val,
                const struct kernel_param *kp)
 {
@@ -382,6 +384,10 @@ static int enabled_store(const char *val,
        if (rc < 0)
                return rc;
 
+       /* system_wq might not initialized yet */
+       if (!damon_reclaim_initialized)
+               return rc;
+
        if (enabled)
                schedule_delayed_work(&damon_reclaim_timer, 0);
 
@@ -449,6 +455,8 @@ static int __init damon_reclaim_init(void)
        damon_add_target(ctx, target);
 
        schedule_delayed_work(&damon_reclaim_timer, 0);
+
+       damon_reclaim_initialized = true;
        return 0;
 }
 
index 59e1653..3c7b9d6 100644 (file)
@@ -336,8 +336,7 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
        if (pte_young(entry)) {
                referenced = true;
                entry = pte_mkold(entry);
-               huge_ptep_set_access_flags(vma, addr, pte, entry,
-                                          vma->vm_flags & VM_WRITE);
+               set_huge_pte_at(mm, addr, pte, entry);
        }
 
 #ifdef CONFIG_MMU_NOTIFIER
index ac3775c..ffdfbc8 100644 (file)
@@ -2385,6 +2385,8 @@ static void filemap_get_read_batch(struct address_space *mapping,
                        continue;
                if (xas.xa_index > max || xa_is_value(folio))
                        break;
+               if (xa_is_sibling(folio))
+                       break;
                if (!folio_try_get_rcu(folio))
                        goto retry;
 
@@ -2629,6 +2631,13 @@ err:
        return err;
 }
 
+static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
+{
+       unsigned int shift = folio_shift(folio);
+
+       return (pos1 >> shift == pos2 >> shift);
+}
+
 /**
  * filemap_read - Read data from the page cache.
  * @iocb: The iocb to read.
@@ -2700,11 +2709,11 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
                writably_mapped = mapping_writably_mapped(mapping);
 
                /*
-                * When a sequential read accesses a page several times, only
+                * When a read accesses the same folio several times, only
                 * mark it as accessed the first time.
                 */
-               if (iocb->ki_pos >> PAGE_SHIFT !=
-                   ra->prev_pos >> PAGE_SHIFT)
+               if (!pos_same_folio(iocb->ki_pos, ra->prev_pos - 1,
+                                                       fbatch.folios[0]))
                        folio_mark_accessed(fbatch.folios[0]);
 
                for (i = 0; i < folio_batch_count(&fbatch); i++) {
index 5512644..e2a39e3 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -87,7 +87,8 @@ retry:
         * belongs to this folio.
         */
        if (unlikely(page_folio(page) != folio)) {
-               folio_put_refs(folio, refs);
+               if (!put_devmap_managed_page_refs(&folio->page, refs))
+                       folio_put_refs(folio, refs);
                goto retry;
        }
 
@@ -176,7 +177,8 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
                        refs *= GUP_PIN_COUNTING_BIAS;
        }
 
-       folio_put_refs(folio, refs);
+       if (!put_devmap_managed_page_refs(&folio->page, refs))
+               folio_put_refs(folio, refs);
 }
 
 /**
index 3fd3242..f2aa63b 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -212,14 +212,6 @@ int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
                unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
-static inline bool hmm_is_device_private_entry(struct hmm_range *range,
-               swp_entry_t entry)
-{
-       return is_device_private_entry(entry) &&
-               pfn_swap_entry_to_page(entry)->pgmap->owner ==
-               range->dev_private_owner;
-}
-
 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
                                                 pte_t pte)
 {
@@ -252,10 +244,12 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
                swp_entry_t entry = pte_to_swp_entry(pte);
 
                /*
-                * Never fault in device private pages, but just report
-                * the PFN even if not present.
+                * Don't fault in device private pages owned by the caller,
+                * just report the PFN.
                 */
-               if (hmm_is_device_private_entry(range, entry)) {
+               if (is_device_private_entry(entry) &&
+                   pfn_swap_entry_to_page(entry)->pgmap->owner ==
+                   range->dev_private_owner) {
                        cpu_flags = HMM_PFN_VALID;
                        if (is_writable_device_private_entry(entry))
                                cpu_flags |= HMM_PFN_WRITE;
@@ -273,6 +267,9 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
                if (!non_swap_entry(entry))
                        goto fault;
 
+               if (is_device_private_entry(entry))
+                       goto fault;
+
                if (is_device_exclusive_entry(entry))
                        goto fault;
 
index f724800..834f288 100644 (file)
@@ -2377,6 +2377,7 @@ static void __split_huge_page_tail(struct page *head, int tail,
                        page_tail);
        page_tail->mapping = head->mapping;
        page_tail->index = head->index + tail;
+       page_tail->private = 0;
 
        /* Page flags must be visible before we make the page non-compound. */
        smp_wmb();
index a57e1be..a18c071 100644 (file)
@@ -4788,8 +4788,13 @@ again:
                         * sharing with another vma.
                         */
                        ;
-               } else if (unlikely(is_hugetlb_entry_migration(entry) ||
-                                   is_hugetlb_entry_hwpoisoned(entry))) {
+               } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
+                       bool uffd_wp = huge_pte_uffd_wp(entry);
+
+                       if (!userfaultfd_wp(dst_vma) && uffd_wp)
+                               entry = huge_pte_clear_uffd_wp(entry);
+                       set_huge_pte_at(dst, addr, dst_pte, entry);
+               } else if (unlikely(is_hugetlb_entry_migration(entry))) {
                        swp_entry_t swp_entry = pte_to_swp_entry(entry);
                        bool uffd_wp = huge_pte_uffd_wp(entry);
 
@@ -5947,6 +5952,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
 
                page = alloc_huge_page(dst_vma, dst_addr, 0);
                if (IS_ERR(page)) {
+                       put_page(*pagep);
                        ret = -ENOMEM;
                        *pagep = NULL;
                        goto out;
index 5c0cddd..65e242b 100644 (file)
@@ -48,7 +48,7 @@ static int hwpoison_inject(void *data, u64 val)
 
 inject:
        pr_info("Injecting memory failure at pfn %#lx\n", pfn);
-       err = memory_failure(pfn, 0);
+       err = memory_failure(pfn, MF_SW_SIMULATED);
        return (err == -EOPNOTSUPP) ? 0 : err;
 }
 
index 5fe598e..8652426 100644 (file)
 #include <linux/io.h>
 #include <linux/export.h>
 
-void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
+void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+                          unsigned long prot)
 {
        unsigned long offset, vaddr;
        phys_addr_t last_addr;
        struct vm_struct *area;
 
        /* Disallow wrap-around or zero size */
-       last_addr = addr + size - 1;
-       if (!size || last_addr < addr)
+       last_addr = phys_addr + size - 1;
+       if (!size || last_addr < phys_addr)
                return NULL;
 
        /* Page-align mappings */
-       offset = addr & (~PAGE_MASK);
-       addr -= offset;
+       offset = phys_addr & (~PAGE_MASK);
+       phys_addr -= offset;
        size = PAGE_ALIGN(size + offset);
 
+       if (!ioremap_allowed(phys_addr, size, prot))
+               return NULL;
+
        area = get_vm_area_caller(size, VM_IOREMAP,
                        __builtin_return_address(0));
        if (!area)
                return NULL;
        vaddr = (unsigned long)area->addr;
+       area->phys_addr = phys_addr;
 
-       if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
+       if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
+                              __pgprot(prot))) {
                free_vm_area(area);
                return NULL;
        }
@@ -44,6 +50,12 @@ EXPORT_SYMBOL(ioremap_prot);
 
 void iounmap(volatile void __iomem *addr)
 {
-       vunmap((void *)((unsigned long)addr & PAGE_MASK));
+       void *vaddr = (void *)((unsigned long)addr & PAGE_MASK);
+
+       if (!iounmap_allowed(vaddr))
+               return;
+
+       if (is_vmalloc_addr(vaddr))
+               vunmap(vaddr);
 }
 EXPORT_SYMBOL(iounmap);
index c40c0e7..78be2be 100644 (file)
@@ -108,9 +108,10 @@ void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
                return;
 
        tag = kasan_random_tag();
+       kasan_unpoison(set_tag(page_address(page), tag),
+                      PAGE_SIZE << order, init);
        for (i = 0; i < (1 << order); i++)
                page_kasan_tag_set(page + i, tag);
-       kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
 }
 
 void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
index 4e7cd4c..6aff49f 100644 (file)
@@ -360,6 +360,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
        unsigned long flags;
        struct slab *slab;
        void *addr;
+       const bool random_right_allocate = prandom_u32_max(2);
+       const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
+                                 !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS);
 
        /* Try to obtain a free object. */
        raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
@@ -404,7 +407,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
         * is that the out-of-bounds accesses detected are deterministic for
         * such allocations.
         */
-       if (prandom_u32_max(2)) {
+       if (random_right_allocate) {
                /* Allocate on the "right" side, re-calculate address. */
                meta->addr += PAGE_SIZE - size;
                meta->addr = ALIGN_DOWN(meta->addr, cache->align);
@@ -444,7 +447,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
        if (cache->ctor)
                cache->ctor(addr);
 
-       if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
+       if (random_fault)
                kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
 
        atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
@@ -600,14 +603,6 @@ static unsigned long kfence_init_pool(void)
                addr += 2 * PAGE_SIZE;
        }
 
-       /*
-        * The pool is live and will never be deallocated from this point on.
-        * Remove the pool object from the kmemleak object tree, as it would
-        * otherwise overlap with allocations returned by kfence_alloc(), which
-        * are registered with kmemleak through the slab post-alloc hook.
-        */
-       kmemleak_free(__kfence_pool);
-
        return 0;
 }
 
@@ -620,8 +615,16 @@ static bool __init kfence_init_pool_early(void)
 
        addr = kfence_init_pool();
 
-       if (!addr)
+       if (!addr) {
+               /*
+                * The pool is live and will never be deallocated from this point on.
+                * Ignore the pool object from the kmemleak phys object tree, as it would
+                * otherwise overlap with allocations returned by kfence_alloc(), which
+                * are registered with kmemleak through the slab post-alloc hook.
+                */
+               kmemleak_ignore_phys(__pa(__kfence_pool));
                return true;
+       }
 
        /*
         * Only release unprotected pages, and do not try to go back and change
index d7b4f26..0316bbc 100644 (file)
@@ -1112,7 +1112,7 @@ static int madvise_inject_error(int behavior,
                } else {
                        pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
                                 pfn, start);
-                       ret = memory_failure(pfn, MF_COUNT_INCREASED);
+                       ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED);
                        if (ret == -EOPNOTSUPP)
                                ret = 0;
                }
index abec50f..618c366 100644 (file)
@@ -4859,7 +4859,7 @@ static int mem_cgroup_slab_show(struct seq_file *m, void *p)
 {
        /*
         * Deprecated.
-        * Please, take a look at tools/cgroup/slabinfo.py .
+        * Please, take a look at tools/cgroup/memcg_slabinfo.py .
         */
        return 0;
 }
index b85661c..da39ec8 100644 (file)
@@ -69,6 +69,8 @@ int sysctl_memory_failure_recovery __read_mostly = 1;
 
 atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
 
+static bool hw_memory_failure __read_mostly = false;
+
 static bool __page_handle_poison(struct page *page)
 {
        int ret;
@@ -1768,6 +1770,9 @@ int memory_failure(unsigned long pfn, int flags)
 
        mutex_lock(&mf_mutex);
 
+       if (!(flags & MF_SW_SIMULATED))
+               hw_memory_failure = true;
+
        p = pfn_to_online_page(pfn);
        if (!p) {
                res = arch_memory_failure(pfn, flags);
@@ -2103,6 +2108,13 @@ int unpoison_memory(unsigned long pfn)
 
        mutex_lock(&mf_mutex);
 
+       if (hw_memory_failure) {
+               unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n",
+                                pfn, &unpoison_rs);
+               ret = -EOPNOTSUPP;
+               goto unlock_mutex;
+       }
+
        if (!PageHWPoison(p)) {
                unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
                                 pfn, &unpoison_rs);
index 7a08914..1c6027a 100644 (file)
@@ -3043,7 +3043,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
        pte_t entry;
 
        VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
-       VM_BUG_ON(PageAnon(page) && !PageAnonExclusive(page));
+       VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page));
 
        /*
         * Clear the pages cpupid information as the existing
@@ -4369,9 +4369,12 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
                        return VM_FAULT_OOM;
        }
 
-       /* See comment in handle_pte_fault() */
+       /*
+        * See comment in handle_pte_fault() for how this scenario happens, we
+        * need to return NOPAGE so that we drop this page.
+        */
        if (pmd_devmap_trans_unstable(vmf->pmd))
-               return 0;
+               return VM_FAULT_NOPAGE;
 
        vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
                                      vmf->address, &vmf->ptl);
@@ -4802,6 +4805,19 @@ static vm_fault_t create_huge_pud(struct vm_fault *vmf)
        defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
        /* No support for anonymous transparent PUD pages yet */
        if (vma_is_anonymous(vmf->vma))
+               return VM_FAULT_FALLBACK;
+       if (vmf->vma->vm_ops->huge_fault)
+               return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+       return VM_FAULT_FALLBACK;
+}
+
+static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
+{
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&                    \
+       defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+       /* No support for anonymous transparent PUD pages yet */
+       if (vma_is_anonymous(vmf->vma))
                goto split;
        if (vmf->vma->vm_ops->huge_fault) {
                vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
@@ -4812,19 +4828,7 @@ static vm_fault_t create_huge_pud(struct vm_fault *vmf)
 split:
        /* COW or write-notify not handled on PUD level: split pud.*/
        __split_huge_pud(vmf->vma, vmf->pud, vmf->address);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-       return VM_FAULT_FALLBACK;
-}
-
-static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
-{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       /* No support for anonymous transparent PUD pages yet */
-       if (vma_is_anonymous(vmf->vma))
-               return VM_FAULT_FALLBACK;
-       if (vmf->vma->vm_ops->huge_fault)
-               return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
        return VM_FAULT_FALLBACK;
 }
 
index b870a65..745eea0 100644 (file)
@@ -499,7 +499,7 @@ void free_zone_device_page(struct page *page)
 }
 
 #ifdef CONFIG_FS_DAX
-bool __put_devmap_managed_page(struct page *page)
+bool __put_devmap_managed_page_refs(struct page *page, int refs)
 {
        if (page->pgmap->type != MEMORY_DEVICE_FS_DAX)
                return false;
@@ -509,9 +509,9 @@ bool __put_devmap_managed_page(struct page *page)
         * refcount is 1, then the page is free and the refcount is
         * stable because nobody holds a reference on the page.
         */
-       if (page_ref_dec_return(page) == 1)
+       if (page_ref_sub_return(page, refs) == 1)
                wake_up_var(&page->_refcount);
        return true;
 }
-EXPORT_SYMBOL(__put_devmap_managed_page);
+EXPORT_SYMBOL(__put_devmap_managed_page_refs);
 #endif /* CONFIG_FS_DAX */
index e51588e..6c1ea61 100644 (file)
@@ -1106,6 +1106,7 @@ static int unmap_and_move(new_page_t get_new_page,
        if (!newpage)
                return -ENOMEM;
 
+       newpage->private = 0;
        rc = __unmap_and_move(page, newpage, force, mode);
        if (rc == MIGRATEPAGE_SUCCESS)
                set_page_owner_migrate_reason(newpage, reason);
index e008a3d..b0bcab5 100644 (file)
@@ -2361,7 +2361,7 @@ static inline bool check_new_pcp(struct page *page, unsigned int order)
 }
 #endif /* CONFIG_DEBUG_VM */
 
-static inline bool should_skip_kasan_unpoison(gfp_t flags, bool init_tags)
+static inline bool should_skip_kasan_unpoison(gfp_t flags)
 {
        /* Don't skip if a software KASAN mode is enabled. */
        if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
@@ -2373,12 +2373,10 @@ static inline bool should_skip_kasan_unpoison(gfp_t flags, bool init_tags)
                return true;
 
        /*
-        * With hardware tag-based KASAN enabled, skip if either:
-        *
-        * 1. Memory tags have already been cleared via tag_clear_highpage().
-        * 2. Skipping has been requested via __GFP_SKIP_KASAN_UNPOISON.
+        * With hardware tag-based KASAN enabled, skip if this has been
+        * requested via __GFP_SKIP_KASAN_UNPOISON.
         */
-       return init_tags || (flags & __GFP_SKIP_KASAN_UNPOISON);
+       return flags & __GFP_SKIP_KASAN_UNPOISON;
 }
 
 static inline bool should_skip_init(gfp_t flags)
@@ -2397,6 +2395,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
        bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
                        !should_skip_init(gfp_flags);
        bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS);
+       int i;
 
        set_page_private(page, 0);
        set_page_refcounted(page);
@@ -2422,8 +2421,6 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
         * should be initialized as well).
         */
        if (init_tags) {
-               int i;
-
                /* Initialize both memory and tags. */
                for (i = 0; i != 1 << order; ++i)
                        tag_clear_highpage(page + i);
@@ -2431,13 +2428,17 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
                /* Note that memory is already initialized by the loop above. */
                init = false;
        }
-       if (!should_skip_kasan_unpoison(gfp_flags, init_tags)) {
+       if (!should_skip_kasan_unpoison(gfp_flags)) {
                /* Unpoison shadow memory or set memory tags. */
                kasan_unpoison_pages(page, order, init);
 
                /* Note that memory is already initialized by KASAN. */
                if (kasan_has_integrated_init())
                        init = false;
+       } else {
+               /* Ensure page_address() dereferencing does not fault. */
+               for (i = 0; i != 1 << order; ++i)
+                       page_kasan_tag_reset(page + i);
        }
        /* If memory is still not initialized, do it now. */
        if (init)
@@ -3968,11 +3969,15 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
         * need to be calculated.
         */
        if (!order) {
-               long fast_free;
+               long usable_free;
+               long reserved;
+
+               usable_free = free_pages;
+               reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
 
-               fast_free = free_pages;
-               fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags);
-               if (fast_free > mark + z->lowmem_reserve[highest_zoneidx])
+               /* reserved may over estimate high-atomic reserves. */
+               usable_free -= min(usable_free, reserved);
+               if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
                        return true;
        }
 
index d200d41..9d73dc3 100644 (file)
@@ -286,6 +286,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  * @flags:                     isolation flags
  * @gfp_flags:                 GFP flags used for migrating pages
  * @isolate_before:    isolate the pageblock before the boundary_pfn
+ * @skip_isolation:    the flag to skip the pageblock isolation in second
+ *                     isolate_single_pageblock()
  *
  * Free and in-use pages can be as big as MAX_ORDER-1 and contain more than one
  * pageblock. When not all pageblocks within a page are isolated at the same
index 57a0151..fdcd28c 100644 (file)
@@ -510,6 +510,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
                        new_order--;
        }
 
+       filemap_invalidate_lock_shared(mapping);
        while (index <= limit) {
                unsigned int order = new_order;
 
@@ -536,6 +537,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
        }
 
        read_pages(ractl);
+       filemap_invalidate_unlock_shared(mapping);
 
        /*
         * If there were already pages in the page cache, then we may have
index 5bcb334..746c05a 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1899,8 +1899,23 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                /* Unexpected PMD-mapped THP? */
                VM_BUG_ON_FOLIO(!pvmw.pte, folio);
 
-               subpage = folio_page(folio,
-                               pte_pfn(*pvmw.pte) - folio_pfn(folio));
+               if (folio_is_zone_device(folio)) {
+                       /*
+                        * Our PTE is a non-present device exclusive entry and
+                        * calculating the subpage as for the common case would
+                        * result in an invalid pointer.
+                        *
+                        * Since only PAGE_SIZE pages can currently be
+                        * migrated, just set it to page. This will need to be
+                        * changed when hugepage migrations to device private
+                        * memory are supported.
+                        */
+                       VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio);
+                       subpage = &folio->page;
+               } else {
+                       subpage = folio_page(folio,
+                                       pte_pfn(*pvmw.pte) - folio_pfn(folio));
+               }
                address = pvmw.address;
                anon_exclusive = folio_test_anon(folio) &&
                                 PageAnonExclusive(subpage);
@@ -1993,15 +2008,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                        /*
                         * No need to invalidate here it will synchronize on
                         * against the special swap migration pte.
-                        *
-                        * The assignment to subpage above was computed from a
-                        * swap PTE which results in an invalid pointer.
-                        * Since only PAGE_SIZE pages can currently be
-                        * migrated, just set it to page. This will need to be
-                        * changed when hugepage migrations to device private
-                        * memory are supported.
                         */
-                       subpage = &folio->page;
                } else if (PageHWPoison(subpage)) {
                        pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
                        if (folio_test_hugetlb(folio)) {
index 206ed6b..f06279d 100644 (file)
@@ -55,22 +55,28 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
        gfp_t gfp = vmf->gfp_mask;
        unsigned long addr;
        struct page *page;
+       vm_fault_t ret;
        int err;
 
        if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
                return vmf_error(-EINVAL);
 
+       filemap_invalidate_lock_shared(mapping);
+
 retry:
        page = find_lock_page(mapping, offset);
        if (!page) {
                page = alloc_page(gfp | __GFP_ZERO);
-               if (!page)
-                       return VM_FAULT_OOM;
+               if (!page) {
+                       ret = VM_FAULT_OOM;
+                       goto out;
+               }
 
                err = set_direct_map_invalid_noflush(page);
                if (err) {
                        put_page(page);
-                       return vmf_error(err);
+                       ret = vmf_error(err);
+                       goto out;
                }
 
                __SetPageUptodate(page);
@@ -86,7 +92,8 @@ retry:
                        if (err == -EEXIST)
                                goto retry;
 
-                       return vmf_error(err);
+                       ret = vmf_error(err);
+                       goto out;
                }
 
                addr = (unsigned long)page_address(page);
@@ -94,7 +101,11 @@ retry:
        }
 
        vmf->page = page;
-       return VM_FAULT_LOCKED;
+       ret = VM_FAULT_LOCKED;
+
+out:
+       filemap_invalidate_unlock_shared(mapping);
+       return ret;
 }
 
 static const struct vm_operations_struct secretmem_vm_ops = {
@@ -162,12 +173,20 @@ static int secretmem_setattr(struct user_namespace *mnt_userns,
                             struct dentry *dentry, struct iattr *iattr)
 {
        struct inode *inode = d_inode(dentry);
+       struct address_space *mapping = inode->i_mapping;
        unsigned int ia_valid = iattr->ia_valid;
+       int ret;
+
+       filemap_invalidate_lock(mapping);
 
        if ((ia_valid & ATTR_SIZE) && inode->i_size)
-               return -EINVAL;
+               ret = -EINVAL;
+       else
+               ret = simple_setattr(mnt_userns, dentry, iattr);
 
-       return simple_setattr(mnt_userns, dentry, iattr);
+       filemap_invalidate_unlock(mapping);
+
+       return ret;
 }
 
 static const struct inode_operations secretmem_iops = {
index a6f5653..b7f2d4a 100644 (file)
@@ -3392,7 +3392,7 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
                break;
        case Opt_nr_blocks:
                ctx->blocks = memparse(param->string, &rest);
-               if (*rest)
+               if (*rest || ctx->blocks > S64_MAX)
                        goto bad_value;
                ctx->seen |= SHMEM_SEEN_BLOCKS;
                break;
@@ -3514,10 +3514,7 @@ static int shmem_reconfigure(struct fs_context *fc)
 
        raw_spin_lock(&sbinfo->stat_lock);
        inodes = sbinfo->max_inodes - sbinfo->free_inodes;
-       if (ctx->blocks > S64_MAX) {
-               err = "Number of blocks too large";
-               goto out;
-       }
+
        if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
                if (!sbinfo->max_blocks) {
                        err = "Cannot retroactively limit size";
index f8cd00f..5e73e2d 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3230,7 +3230,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
        }
        /* ___cache_alloc_node can fall back to other nodes */
        ptr = ____cache_alloc_node(cachep, flags, nodeid);
-  out:
+out:
        local_irq_restore(save_flags);
        ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
        init = slab_want_init_on_alloc(flags, cachep);
@@ -3259,7 +3259,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
        if (!objp)
                objp = ____cache_alloc_node(cache, flags, numa_mem_id());
 
-  out:
+out:
        return objp;
 }
 #else
@@ -3406,9 +3406,10 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
 {
        bool init;
 
+       memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1);
+
        if (is_kfence_address(objp)) {
                kmemleak_free_recursive(objp, cachep->flags);
-               memcg_slab_free_hook(cachep, &objp, 1);
                __kfence_free(objp);
                return;
        }
@@ -3441,7 +3442,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
        check_irq_off();
        kmemleak_free_recursive(objp, cachep->flags);
        objp = cache_free_debugcheck(cachep, objp, caller);
-       memcg_slab_free_hook(cachep, &objp, 1);
 
        /*
         * Skip calling cache_free_alien() when the platform is not numa.
@@ -3478,7 +3478,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
 {
        void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_);
 
-       trace_kmem_cache_alloc(_RET_IP_, ret,
+       trace_kmem_cache_alloc(_RET_IP_, ret, cachep,
                               cachep->object_size, cachep->size, flags);
 
        return ret;
@@ -3553,7 +3553,7 @@ error:
        local_irq_enable();
        cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
        slab_post_alloc_hook(s, objcg, flags, i, p, false);
-       __kmem_cache_free_bulk(s, i, p);
+       kmem_cache_free_bulk(s, i, p);
        return 0;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_bulk);
@@ -3567,7 +3567,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
        ret = slab_alloc(cachep, NULL, flags, size, _RET_IP_);
 
        ret = kasan_kmalloc(cachep, ret, size, flags);
-       trace_kmalloc(_RET_IP_, ret,
+       trace_kmalloc(_RET_IP_, ret, cachep,
                      size, cachep->size, flags);
        return ret;
 }
@@ -3592,7 +3592,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
        void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_);
 
-       trace_kmem_cache_alloc_node(_RET_IP_, ret,
+       trace_kmem_cache_alloc_node(_RET_IP_, ret, cachep,
                                    cachep->object_size, cachep->size,
                                    flags, nodeid);
 
@@ -3611,7 +3611,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
        ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_);
 
        ret = kasan_kmalloc(cachep, ret, size, flags);
-       trace_kmalloc_node(_RET_IP_, ret,
+       trace_kmalloc_node(_RET_IP_, ret, cachep,
                           size, cachep->size,
                           flags, nodeid);
        return ret;
@@ -3694,7 +3694,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
        ret = slab_alloc(cachep, NULL, flags, size, caller);
 
        ret = kasan_kmalloc(cachep, ret, size, flags);
-       trace_kmalloc(caller, ret,
+       trace_kmalloc(caller, ret, cachep,
                      size, cachep->size, flags);
 
        return ret;
index db9fb5c..4ec82be 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -380,15 +380,6 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
                       size_t count, loff_t *ppos);
 
-/*
- * Generic implementation of bulk operations
- * These are useful for situations in which the allocator cannot
- * perform optimizations. In that case segments of the object listed
- * may be allocated or freed using these operations.
- */
-void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
-int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
-
 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
 {
        return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
@@ -547,36 +538,22 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
        obj_cgroup_put(objcg);
 }
 
-static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
+static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
                                        void **p, int objects)
 {
-       struct kmem_cache *s;
        struct obj_cgroup **objcgs;
-       struct obj_cgroup *objcg;
-       struct slab *slab;
-       unsigned int off;
        int i;
 
        if (!memcg_kmem_enabled())
                return;
 
-       for (i = 0; i < objects; i++) {
-               if (unlikely(!p[i]))
-                       continue;
-
-               slab = virt_to_slab(p[i]);
-               /* we could be given a kmalloc_large() object, skip those */
-               if (!slab)
-                       continue;
-
-               objcgs = slab_objcgs(slab);
-               if (!objcgs)
-                       continue;
+       objcgs = slab_objcgs(slab);
+       if (!objcgs)
+               return;
 
-               if (!s_orig)
-                       s = slab->slab_cache;
-               else
-                       s = s_orig;
+       for (i = 0; i < objects; i++) {
+               struct obj_cgroup *objcg;
+               unsigned int off;
 
                off = obj_to_index(s, slab, p[i]);
                objcg = objcgs[off];
@@ -628,7 +605,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
 {
 }
 
-static inline void memcg_slab_free_hook(struct kmem_cache *s,
+static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
                                        void **p, int objects)
 {
 }
index 77c3adf..1799664 100644 (file)
 #include <linux/memcontrol.h>
 #include <linux/stackdepot.h>
 
-#define CREATE_TRACE_POINTS
-#include <trace/events/kmem.h>
-
 #include "internal.h"
-
 #include "slab.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/kmem.h>
+
 enum slab_state slab_state;
 LIST_HEAD(slab_caches);
 DEFINE_MUTEX(slab_mutex);
@@ -105,33 +104,6 @@ static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
 }
 #endif
 
-void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
-{
-       size_t i;
-
-       for (i = 0; i < nr; i++) {
-               if (s)
-                       kmem_cache_free(s, p[i]);
-               else
-                       kfree(p[i]);
-       }
-}
-
-int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
-                                                               void **p)
-{
-       size_t i;
-
-       for (i = 0; i < nr; i++) {
-               void *x = p[i] = kmem_cache_alloc(s, flags);
-               if (!x) {
-                       __kmem_cache_free_bulk(s, i, p);
-                       return 0;
-               }
-       }
-       return i;
-}
-
 /*
  * Figure out what the alignment of the objects will be given a set of
  * flags, a user specified alignment and the size of the objects.
@@ -959,7 +931,7 @@ EXPORT_SYMBOL(kmalloc_order);
 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
 {
        void *ret = kmalloc_order(size, flags, order);
-       trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
+       trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags);
        return ret;
 }
 EXPORT_SYMBOL(kmalloc_order_trace);
index f47811f..2bd4f47 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -507,7 +507,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
                *m = size;
                ret = (void *)m + minalign;
 
-               trace_kmalloc_node(caller, ret,
+               trace_kmalloc_node(caller, ret, NULL,
                                   size, size + minalign, gfp, node);
        } else {
                unsigned int order = get_order(size);
@@ -516,7 +516,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
                        gfp |= __GFP_COMP;
                ret = slob_new_pages(gfp, order, node);
 
-               trace_kmalloc_node(caller, ret,
+               trace_kmalloc_node(caller, ret, NULL,
                                   size, PAGE_SIZE << order, gfp, node);
        }
 
@@ -616,12 +616,12 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
 
        if (c->size < PAGE_SIZE) {
                b = slob_alloc(c->size, flags, c->align, node, 0);
-               trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
+               trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size,
                                            SLOB_UNITS(c->size) * SLOB_UNIT,
                                            flags, node);
        } else {
                b = slob_new_pages(flags, get_order(c->size), node);
-               trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
+               trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size,
                                            PAGE_SIZE << get_order(c->size),
                                            flags, node);
        }
@@ -692,16 +692,33 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
-void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+void kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
 {
-       __kmem_cache_free_bulk(s, size, p);
+       size_t i;
+
+       for (i = 0; i < nr; i++) {
+               if (s)
+                       kmem_cache_free(s, p[i]);
+               else
+                       kfree(p[i]);
+       }
 }
 EXPORT_SYMBOL(kmem_cache_free_bulk);
 
-int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
                                                                void **p)
 {
-       return __kmem_cache_alloc_bulk(s, flags, size, p);
+       size_t i;
+
+       for (i = 0; i < nr; i++) {
+               void *x = p[i] = kmem_cache_alloc(s, flags);
+
+               if (!x) {
+                       kmem_cache_free_bulk(s, i, p);
+                       return 0;
+               }
+       }
+       return i;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_bulk);
 
index e553502..862dbd9 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -726,25 +726,48 @@ static struct track *get_track(struct kmem_cache *s, void *object,
        return kasan_reset_tag(p + alloc);
 }
 
-static void noinline set_track(struct kmem_cache *s, void *object,
-                       enum track_item alloc, unsigned long addr)
-{
-       struct track *p = get_track(s, object, alloc);
-
 #ifdef CONFIG_STACKDEPOT
+static noinline depot_stack_handle_t set_track_prepare(void)
+{
+       depot_stack_handle_t handle;
        unsigned long entries[TRACK_ADDRS_COUNT];
        unsigned int nr_entries;
 
        nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
-       p->handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
+       handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
+
+       return handle;
+}
+#else
+static inline depot_stack_handle_t set_track_prepare(void)
+{
+       return 0;
+}
 #endif
 
+static void set_track_update(struct kmem_cache *s, void *object,
+                            enum track_item alloc, unsigned long addr,
+                            depot_stack_handle_t handle)
+{
+       struct track *p = get_track(s, object, alloc);
+
+#ifdef CONFIG_STACKDEPOT
+       p->handle = handle;
+#endif
        p->addr = addr;
        p->cpu = smp_processor_id();
        p->pid = current->pid;
        p->when = jiffies;
 }
 
+static __always_inline void set_track(struct kmem_cache *s, void *object,
+                                     enum track_item alloc, unsigned long addr)
+{
+       depot_stack_handle_t handle = set_track_prepare();
+
+       set_track_update(s, object, alloc, addr, handle);
+}
+
 static void init_tracking(struct kmem_cache *s, void *object)
 {
        struct track *p;
@@ -1373,6 +1396,10 @@ static noinline int free_debug_processing(
        int cnt = 0;
        unsigned long flags, flags2;
        int ret = 0;
+       depot_stack_handle_t handle = 0;
+
+       if (s->flags & SLAB_STORE_USER)
+               handle = set_track_prepare();
 
        spin_lock_irqsave(&n->list_lock, flags);
        slab_lock(slab, &flags2);
@@ -1391,7 +1418,7 @@ next_object:
        }
 
        if (s->flags & SLAB_STORE_USER)
-               set_track(s, object, TRACK_FREE, addr);
+               set_track_update(s, object, TRACK_FREE, addr, handle);
        trace(s, slab, object, 0);
        /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
        init_object(s, object, SLUB_RED_INACTIVE);
@@ -2936,6 +2963,7 @@ redo:
 
        if (!freelist) {
                c->slab = NULL;
+               c->tid = next_tid(c->tid);
                local_unlock_irqrestore(&s->cpu_slab->lock, flags);
                stat(s, DEACTIVATE_BYPASS);
                goto new_slab;
@@ -2968,6 +2996,7 @@ deactivate_slab:
        freelist = c->freelist;
        c->slab = NULL;
        c->freelist = NULL;
+       c->tid = next_tid(c->tid);
        local_unlock_irqrestore(&s->cpu_slab->lock, flags);
        deactivate_slab(s, slab, freelist);
 
@@ -3228,7 +3257,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
 {
        void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size);
 
-       trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
+       trace_kmem_cache_alloc(_RET_IP_, ret, s, s->object_size,
                                s->size, gfpflags);
 
        return ret;
@@ -3251,7 +3280,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_lru);
 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
 {
        void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size);
-       trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
+       trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags);
        ret = kasan_kmalloc(s, ret, size, gfpflags);
        return ret;
 }
@@ -3263,7 +3292,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
 {
        void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
 
-       trace_kmem_cache_alloc_node(_RET_IP_, ret,
+       trace_kmem_cache_alloc_node(_RET_IP_, ret, s,
                                    s->object_size, s->size, gfpflags, node);
 
        return ret;
@@ -3277,7 +3306,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
 {
        void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
 
-       trace_kmalloc_node(_RET_IP_, ret,
+       trace_kmalloc_node(_RET_IP_, ret, s,
                           size, s->size, gfpflags, node);
 
        ret = kasan_kmalloc(s, ret, size, gfpflags);
@@ -3435,9 +3464,6 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
        struct kmem_cache_cpu *c;
        unsigned long tid;
 
-       /* memcg_slab_free_hook() is already called for bulk free. */
-       if (!tail)
-               memcg_slab_free_hook(s, &head, 1);
 redo:
        /*
         * Determine the currently cpus per cpu slab.
@@ -3497,9 +3523,10 @@ redo:
 }
 
 static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,
-                                     void *head, void *tail, int cnt,
+                                     void *head, void *tail, void **p, int cnt,
                                      unsigned long addr)
 {
+       memcg_slab_free_hook(s, slab, p, cnt);
        /*
         * With KASAN enabled slab_free_freelist_hook modifies the freelist
         * to remove objects, whose reuse must be delayed.
@@ -3521,7 +3548,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
        if (!s)
                return;
        trace_kmem_cache_free(_RET_IP_, x, s->name);
-       slab_free(s, virt_to_slab(x), x, NULL, 1, _RET_IP_);
+       slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_);
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
@@ -3562,88 +3589,67 @@ static inline
 int build_detached_freelist(struct kmem_cache *s, size_t size,
                            void **p, struct detached_freelist *df)
 {
-       size_t first_skipped_index = 0;
        int lookahead = 3;
        void *object;
        struct folio *folio;
-       struct slab *slab;
-
-       /* Always re-init detached_freelist */
-       df->slab = NULL;
-
-       do {
-               object = p[--size];
-               /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
-       } while (!object && size);
-
-       if (!object)
-               return 0;
+       size_t same;
 
+       object = p[--size];
        folio = virt_to_folio(object);
        if (!s) {
                /* Handle kalloc'ed objects */
                if (unlikely(!folio_test_slab(folio))) {
                        free_large_kmalloc(folio, object);
-                       p[size] = NULL; /* mark object processed */
+                       df->slab = NULL;
                        return size;
                }
                /* Derive kmem_cache from object */
-               slab = folio_slab(folio);
-               df->s = slab->slab_cache;
+               df->slab = folio_slab(folio);
+               df->s = df->slab->slab_cache;
        } else {
-               slab = folio_slab(folio);
+               df->slab = folio_slab(folio);
                df->s = cache_from_obj(s, object); /* Support for memcg */
        }
 
-       if (is_kfence_address(object)) {
-               slab_free_hook(df->s, object, false);
-               __kfence_free(object);
-               p[size] = NULL; /* mark object processed */
-               return size;
-       }
-
        /* Start new detached freelist */
-       df->slab = slab;
-       set_freepointer(df->s, object, NULL);
        df->tail = object;
        df->freelist = object;
-       p[size] = NULL; /* mark object processed */
        df->cnt = 1;
 
+       if (is_kfence_address(object))
+               return size;
+
+       set_freepointer(df->s, object, NULL);
+
+       same = size;
        while (size) {
                object = p[--size];
-               if (!object)
-                       continue; /* Skip processed objects */
-
                /* df->slab is always set at this point */
                if (df->slab == virt_to_slab(object)) {
                        /* Opportunity build freelist */
                        set_freepointer(df->s, object, df->freelist);
                        df->freelist = object;
                        df->cnt++;
-                       p[size] = NULL; /* mark object processed */
-
+                       same--;
+                       if (size != same)
+                               swap(p[size], p[same]);
                        continue;
                }
 
                /* Limit look ahead search */
                if (!--lookahead)
                        break;
-
-               if (!first_skipped_index)
-                       first_skipped_index = size + 1;
        }
 
-       return first_skipped_index;
+       return same;
 }
 
 /* Note that interrupts must be enabled when calling this function. */
 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
 {
-       if (WARN_ON(!size))
+       if (!size)
                return;
 
-       memcg_slab_free_hook(s, p, size);
        do {
                struct detached_freelist df;
 
@@ -3651,7 +3657,8 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
                if (!df.slab)
                        continue;
 
-               slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, _RET_IP_);
+               slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt,
+                         _RET_IP_);
        } while (likely(size));
 }
 EXPORT_SYMBOL(kmem_cache_free_bulk);
@@ -3731,7 +3738,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 error:
        slub_put_cpu_ptr(s->cpu_slab);
        slab_post_alloc_hook(s, objcg, flags, i, p, false);
-       __kmem_cache_free_bulk(s, i, p);
+       kmem_cache_free_bulk(s, i, p);
        return 0;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_bulk);
@@ -4412,7 +4419,7 @@ void *__kmalloc(size_t size, gfp_t flags)
 
        ret = slab_alloc(s, NULL, flags, _RET_IP_, size);
 
-       trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
+       trace_kmalloc(_RET_IP_, ret, s, size, s->size, flags);
 
        ret = kasan_kmalloc(s, ret, size, flags);
 
@@ -4446,7 +4453,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
        if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
                ret = kmalloc_large_node(size, flags, node);
 
-               trace_kmalloc_node(_RET_IP_, ret,
+               trace_kmalloc_node(_RET_IP_, ret, NULL,
                                   size, PAGE_SIZE << get_order(size),
                                   flags, node);
 
@@ -4460,7 +4467,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
 
        ret = slab_alloc_node(s, NULL, flags, node, _RET_IP_, size);
 
-       trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
+       trace_kmalloc_node(_RET_IP_, ret, s, size, s->size, flags, node);
 
        ret = kasan_kmalloc(s, ret, size, flags);
 
@@ -4552,7 +4559,7 @@ void kfree(const void *x)
                return;
        }
        slab = folio_slab(folio);
-       slab_free(slab->slab_cache, slab, object, NULL, 1, _RET_IP_);
+       slab_free(slab->slab_cache, slab, object, NULL, &object, 1, _RET_IP_);
 }
 EXPORT_SYMBOL(kfree);
 
@@ -4861,6 +4868,9 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
 
        s = find_mergeable(size, align, flags, name, ctor);
        if (s) {
+               if (sysfs_slab_alias(s, name))
+                       return NULL;
+
                s->refcount++;
 
                /*
@@ -4869,11 +4879,6 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
                 */
                s->object_size = max(s->object_size, size);
                s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
-
-               if (sysfs_slab_alias(s, name)) {
-                       s->refcount--;
-                       s = NULL;
-               }
        }
 
        return s;
@@ -4919,7 +4924,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
        ret = slab_alloc(s, NULL, gfpflags, caller, size);
 
        /* Honor the call site pointer we received. */
-       trace_kmalloc(caller, ret, size, s->size, gfpflags);
+       trace_kmalloc(caller, ret, s, size, s->size, gfpflags);
 
        return ret;
 }
@@ -4935,7 +4940,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
        if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
                ret = kmalloc_large_node(size, gfpflags, node);
 
-               trace_kmalloc_node(caller, ret,
+               trace_kmalloc_node(caller, ret, NULL,
                                   size, PAGE_SIZE << get_order(size),
                                   gfpflags, node);
 
@@ -4950,7 +4955,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
        ret = slab_alloc_node(s, NULL, gfpflags, node, caller, size);
 
        /* Honor the call site pointer we received. */
-       trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
+       trace_kmalloc_node(caller, ret, s, size, s->size, gfpflags, node);
 
        return ret;
 }
index f4fa61d..dbbd1a7 100644 (file)
@@ -78,6 +78,14 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
 
        spin_lock(&init_mm.page_table_lock);
        if (likely(pmd_leaf(*pmd))) {
+               /*
+                * Higher order allocations from buddy allocator must be able to
+                * be treated as indepdenent small pages (as they can be freed
+                * individually).
+                */
+               if (!PageReserved(page))
+                       split_page(page, get_order(PMD_SIZE));
+
                /* Make pte visible before pmd. See comment in pmd_install(). */
                smp_wmb();
                pmd_populate_kernel(&init_mm, pmd, pgtable);
index f3922a9..034bb24 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -881,7 +881,7 @@ void lru_cache_disable(void)
         * lru_disable_count = 0 will have exited the critical
         * section when synchronize_rcu() returns.
         */
-       synchronize_rcu();
+       synchronize_rcu_expedited();
 #ifdef CONFIG_SMP
        __lru_add_drain_all(true);
 #else
index 2a65a89..10b94d6 100644 (file)
@@ -307,7 +307,7 @@ swp_entry_t folio_alloc_swap(struct folio *folio)
        entry.val = 0;
 
        if (folio_test_large(folio)) {
-               if (IS_ENABLED(CONFIG_THP_SWAP))
+               if (IS_ENABLED(CONFIG_THP_SWAP) && arch_thp_swp_supported())
                        get_swap_pages(1, &entry, folio_nr_pages(folio));
                goto out;
        }
index baeacc7..4e1da70 100644 (file)
@@ -161,29 +161,27 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
 static inline void check_heap_object(const void *ptr, unsigned long n,
                                     bool to_user)
 {
+       uintptr_t addr = (uintptr_t)ptr;
+       unsigned long offset;
        struct folio *folio;
 
        if (is_kmap_addr(ptr)) {
-               unsigned long page_end = (unsigned long)ptr | (PAGE_SIZE - 1);
-
-               if ((unsigned long)ptr + n - 1 > page_end)
-                       usercopy_abort("kmap", NULL, to_user,
-                                       offset_in_page(ptr), n);
+               offset = offset_in_page(ptr);
+               if (n > PAGE_SIZE - offset)
+                       usercopy_abort("kmap", NULL, to_user, offset, n);
                return;
        }
 
        if (is_vmalloc_addr(ptr)) {
-               struct vm_struct *area = find_vm_area(ptr);
-               unsigned long offset;
+               struct vmap_area *area = find_vmap_area(addr);
 
-               if (!area) {
+               if (!area)
                        usercopy_abort("vmalloc", "no area", to_user, 0, n);
-                       return;
-               }
 
-               offset = ptr - area->addr;
-               if (offset + n > get_vm_area_size(area))
+               if (n > area->va_end - addr) {
+                       offset = addr - area->va_start;
                        usercopy_abort("vmalloc", NULL, to_user, offset, n);
+               }
                return;
        }
 
@@ -196,8 +194,8 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
                /* Check slab allocator for flags and size. */
                __check_heap_object(ptr, n, folio_slab(folio), to_user);
        } else if (folio_test_large(folio)) {
-               unsigned long offset = ptr - folio_address(folio);
-               if (offset + n > folio_size(folio))
+               offset = ptr - folio_address(folio);
+               if (n > folio_size(folio) - offset)
                        usercopy_abort("page alloc", NULL, to_user, offset, n);
        }
 }
index 4f4892a..07d3bef 100644 (file)
@@ -246,7 +246,10 @@ static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
        struct page *page;
        int ret;
 
-       ret = shmem_getpage(inode, pgoff, &page, SGP_READ);
+       ret = shmem_getpage(inode, pgoff, &page, SGP_NOALLOC);
+       /* Our caller expects us to return -EFAULT if we failed to find page. */
+       if (ret == -ENOENT)
+               ret = -EFAULT;
        if (ret)
                goto out;
        if (!page) {
index 07db424..effd1ff 100644 (file)
@@ -1798,7 +1798,7 @@ static void free_unmap_vmap_area(struct vmap_area *va)
        free_vmap_area_noflush(va);
 }
 
-static struct vmap_area *find_vmap_area(unsigned long addr)
+struct vmap_area *find_vmap_area(unsigned long addr)
 {
        struct vmap_area *va;
 
index 53b1955..2145321 100644 (file)
@@ -182,10 +182,14 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
        else if (dev->mtu > max_mtu)
                return -EINVAL;
 
+       /* Note: If this initial vlan_changelink() fails, we need
+        * to call vlan_dev_free_egress_priority() to free memory.
+        */
        err = vlan_changelink(dev, tb, data, extack);
-       if (err)
-               return err;
-       err = register_vlan_dev(dev, extack);
+
+       if (!err)
+               err = register_vlan_dev(dev, extack);
+
        if (err)
                vlan_dev_free_egress_priority(dev);
        return err;
index 95393bb..4c7030e 100644 (file)
@@ -1661,9 +1661,12 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
                        int flags)
 {
        struct sock *sk = sock->sk;
-       struct sk_buff *skb;
+       struct sk_buff *skb, *last;
+       struct sk_buff_head *sk_queue;
        int copied;
        int err = 0;
+       int off = 0;
+       long timeo;
 
        lock_sock(sk);
        /*
@@ -1675,10 +1678,29 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
                goto out;
        }
 
-       /* Now we can treat all alike */
-       skb = skb_recv_datagram(sk, flags, &err);
-       if (skb == NULL)
-               goto out;
+       /*  We need support for non-blocking reads. */
+       sk_queue = &sk->sk_receive_queue;
+       skb = __skb_try_recv_datagram(sk, sk_queue, flags, &off, &err, &last);
+       /* If no packet is available, release_sock(sk) and try again. */
+       if (!skb) {
+               if (err != -EAGAIN)
+                       goto out;
+               release_sock(sk);
+               timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+               while (timeo && !__skb_wait_for_more_packets(sk, sk_queue, &err,
+                                                            &timeo, last)) {
+                       skb = __skb_try_recv_datagram(sk, sk_queue, flags, &off,
+                                                     &err, &last);
+                       if (skb)
+                               break;
+
+                       if (err != -EAGAIN)
+                               goto done;
+               }
+               if (!skb)
+                       goto done;
+               lock_sock(sk);
+       }
 
        if (!sk_to_ax25(sk)->pidincl)
                skb_pull(skb, 1);               /* Remove PID */
@@ -1725,6 +1747,7 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 out:
        release_sock(sk);
 
+done:
        return err;
 }
 
index 59a5c13..a0f99ba 100644 (file)
@@ -571,6 +571,7 @@ int hci_dev_close(__u16 dev)
                goto done;
        }
 
+       cancel_work_sync(&hdev->power_on);
        if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
                cancel_delayed_work(&hdev->power_off);
 
@@ -2675,6 +2676,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
        list_del(&hdev->list);
        write_unlock(&hci_dev_list_lock);
 
+       cancel_work_sync(&hdev->power_on);
+
        hci_cmd_sync_clear(hdev);
 
        if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
index 286d676..c170216 100644 (file)
@@ -4088,7 +4088,6 @@ int hci_dev_close_sync(struct hci_dev *hdev)
 
        bt_dev_dbg(hdev, "");
 
-       cancel_work_sync(&hdev->power_on);
        cancel_delayed_work(&hdev->power_off);
        cancel_delayed_work(&hdev->ncmd_timer);
 
@@ -4974,6 +4973,9 @@ int hci_suspend_sync(struct hci_dev *hdev)
                return err;
        }
 
+       /* Update event mask so only the allowed event can wakeup the host */
+       hci_set_event_mask_sync(hdev);
+
        /* Only configure accept list if disconnect succeeded and wake
         * isn't being prevented.
         */
@@ -4985,9 +4987,6 @@ int hci_suspend_sync(struct hci_dev *hdev)
        /* Unpause to take care of updating scanning params */
        hdev->scanning_paused = false;
 
-       /* Update event mask so only the allowed event can wakeup the host */
-       hci_set_event_mask_sync(hdev);
-
        /* Enable event filter for paired devices */
        hci_update_event_filter_sync(hdev);
 
index ae78490..5266866 100644 (file)
@@ -111,7 +111,8 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
 }
 
 /* Find channel with given SCID.
- * Returns locked channel. */
+ * Returns a reference locked channel.
+ */
 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
                                                 u16 cid)
 {
@@ -119,15 +120,19 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
 
        mutex_lock(&conn->chan_lock);
        c = __l2cap_get_chan_by_scid(conn, cid);
-       if (c)
-               l2cap_chan_lock(c);
+       if (c) {
+               /* Only lock if chan reference is not 0 */
+               c = l2cap_chan_hold_unless_zero(c);
+               if (c)
+                       l2cap_chan_lock(c);
+       }
        mutex_unlock(&conn->chan_lock);
 
        return c;
 }
 
 /* Find channel with given DCID.
- * Returns locked channel.
+ * Returns a reference locked channel.
  */
 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
                                                 u16 cid)
@@ -136,8 +141,12 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
 
        mutex_lock(&conn->chan_lock);
        c = __l2cap_get_chan_by_dcid(conn, cid);
-       if (c)
-               l2cap_chan_lock(c);
+       if (c) {
+               /* Only lock if chan reference is not 0 */
+               c = l2cap_chan_hold_unless_zero(c);
+               if (c)
+                       l2cap_chan_lock(c);
+       }
        mutex_unlock(&conn->chan_lock);
 
        return c;
@@ -162,8 +171,12 @@ static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
 
        mutex_lock(&conn->chan_lock);
        c = __l2cap_get_chan_by_ident(conn, ident);
-       if (c)
-               l2cap_chan_lock(c);
+       if (c) {
+               /* Only lock if chan reference is not 0 */
+               c = l2cap_chan_hold_unless_zero(c);
+               if (c)
+                       l2cap_chan_lock(c);
+       }
        mutex_unlock(&conn->chan_lock);
 
        return c;
@@ -497,6 +510,16 @@ void l2cap_chan_hold(struct l2cap_chan *c)
        kref_get(&c->kref);
 }
 
+struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
+{
+       BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
+
+       if (!kref_get_unless_zero(&c->kref))
+               return NULL;
+
+       return c;
+}
+
 void l2cap_chan_put(struct l2cap_chan *c)
 {
        BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
@@ -1968,7 +1991,10 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
                        src_match = !bacmp(&c->src, src);
                        dst_match = !bacmp(&c->dst, dst);
                        if (src_match && dst_match) {
-                               l2cap_chan_hold(c);
+                               c = l2cap_chan_hold_unless_zero(c);
+                               if (!c)
+                                       continue;
+
                                read_unlock(&chan_list_lock);
                                return c;
                        }
@@ -1983,7 +2009,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
        }
 
        if (c1)
-               l2cap_chan_hold(c1);
+               c1 = l2cap_chan_hold_unless_zero(c1);
 
        read_unlock(&chan_list_lock);
 
@@ -4463,6 +4489,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
 
 unlock:
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
        return err;
 }
 
@@ -4577,6 +4604,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
 
 done:
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
        return err;
 }
 
@@ -5304,6 +5332,7 @@ send_move_response:
        l2cap_send_move_chan_rsp(chan, result);
 
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 
        return 0;
 }
@@ -5396,6 +5425,7 @@ static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
        }
 
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 }
 
 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
@@ -5425,6 +5455,7 @@ static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
        l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
 
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 }
 
 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
@@ -5488,6 +5519,7 @@ static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
        l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
 
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 
        return 0;
 }
@@ -5523,6 +5555,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
        }
 
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 
        return 0;
 }
@@ -5895,12 +5928,11 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
        if (credits > max_credits) {
                BT_ERR("LE credits overflow");
                l2cap_send_disconn_req(chan, ECONNRESET);
-               l2cap_chan_unlock(chan);
 
                /* Return 0 so that we don't trigger an unnecessary
                 * command reject packet.
                 */
-               return 0;
+               goto unlock;
        }
 
        chan->tx_credits += credits;
@@ -5911,7 +5943,9 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
        if (chan->tx_credits)
                chan->ops->resume(chan);
 
+unlock:
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 
        return 0;
 }
@@ -7597,6 +7631,7 @@ drop:
 
 done:
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 }
 
 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
@@ -8085,7 +8120,7 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
                if (src_type != c->src_type)
                        continue;
 
-               l2cap_chan_hold(c);
+               c = l2cap_chan_hold_unless_zero(c);
                read_unlock(&chan_list_lock);
                return c;
        }
index ae758ab..2f91a8c 100644 (file)
@@ -4723,7 +4723,6 @@ static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
                else
                        status = MGMT_STATUS_FAILED;
 
-               mgmt_pending_remove(cmd);
                goto unlock;
        }
 
index 4fd8826..ff47790 100644 (file)
@@ -1012,9 +1012,24 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net,
                return okfn(net, sk, skb);
 
        ops = nf_hook_entries_get_hook_ops(e);
-       for (i = 0; i < e->num_hook_entries &&
-             ops[i]->priority <= NF_BR_PRI_BRNF; i++)
-               ;
+       for (i = 0; i < e->num_hook_entries; i++) {
+               /* These hooks have already been called */
+               if (ops[i]->priority < NF_BR_PRI_BRNF)
+                       continue;
+
+               /* These hooks have not been called yet, run them. */
+               if (ops[i]->priority > NF_BR_PRI_BRNF)
+                       break;
+
+               /* take a closer look at NF_BR_PRI_BRNF. */
+               if (ops[i]->hook == br_nf_pre_routing) {
+                       /* This hook diverted the skb to this function,
+                        * hooks after this have not been run yet.
+                        */
+                       i++;
+                       break;
+               }
+       }
 
        nf_hook_state_init(&state, hook, NFPROTO_BRIDGE, indev, outdev,
                           sk, net, okfn);
index bb01776..c96509c 100644 (file)
@@ -589,9 +589,13 @@ static int br_fill_ifinfo(struct sk_buff *skb,
        }
 
 done:
+       if (af) {
+               if (nlmsg_get_pos(skb) - (void *)af > nla_attr_size(0))
+                       nla_nest_end(skb, af);
+               else
+                       nla_nest_cancel(skb, af);
+       }
 
-       if (af)
-               nla_nest_end(skb, af);
        nlmsg_end(skb, nlh);
        return 0;
 
index 251e666..748be72 100644 (file)
@@ -47,7 +47,7 @@ enum caif_states {
 struct caifsock {
        struct sock sk; /* must be first member */
        struct cflayer layer;
-       u32 flow_state;
+       unsigned long flow_state;
        struct caif_connect_request conn_req;
        struct mutex readlock;
        struct dentry *debugfs_socket_dir;
@@ -56,38 +56,32 @@ struct caifsock {
 
 static int rx_flow_is_on(struct caifsock *cf_sk)
 {
-       return test_bit(RX_FLOW_ON_BIT,
-                       (void *) &cf_sk->flow_state);
+       return test_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static int tx_flow_is_on(struct caifsock *cf_sk)
 {
-       return test_bit(TX_FLOW_ON_BIT,
-                       (void *) &cf_sk->flow_state);
+       return test_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static void set_rx_flow_off(struct caifsock *cf_sk)
 {
-        clear_bit(RX_FLOW_ON_BIT,
-                (void *) &cf_sk->flow_state);
+       clear_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static void set_rx_flow_on(struct caifsock *cf_sk)
 {
-        set_bit(RX_FLOW_ON_BIT,
-                       (void *) &cf_sk->flow_state);
+       set_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static void set_tx_flow_off(struct caifsock *cf_sk)
 {
-        clear_bit(TX_FLOW_ON_BIT,
-               (void *) &cf_sk->flow_state);
+       clear_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static void set_tx_flow_on(struct caifsock *cf_sk)
 {
-        set_bit(TX_FLOW_ON_BIT,
-               (void *) &cf_sk->flow_state);
+       set_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static void caif_read_lock(struct sock *sk)
index 65ee1b7..e60161b 100644 (file)
@@ -100,6 +100,7 @@ static inline u64 get_u64(const struct canfd_frame *cp, int offset)
 
 struct bcm_op {
        struct list_head list;
+       struct rcu_head rcu;
        int ifindex;
        canid_t can_id;
        u32 flags;
@@ -718,10 +719,9 @@ static struct bcm_op *bcm_find_op(struct list_head *ops,
        return NULL;
 }
 
-static void bcm_remove_op(struct bcm_op *op)
+static void bcm_free_op_rcu(struct rcu_head *rcu_head)
 {
-       hrtimer_cancel(&op->timer);
-       hrtimer_cancel(&op->thrtimer);
+       struct bcm_op *op = container_of(rcu_head, struct bcm_op, rcu);
 
        if ((op->frames) && (op->frames != &op->sframe))
                kfree(op->frames);
@@ -732,6 +732,14 @@ static void bcm_remove_op(struct bcm_op *op)
        kfree(op);
 }
 
+static void bcm_remove_op(struct bcm_op *op)
+{
+       hrtimer_cancel(&op->timer);
+       hrtimer_cancel(&op->thrtimer);
+
+       call_rcu(&op->rcu, bcm_free_op_rcu);
+}
+
 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
 {
        if (op->rx_reg_dev == dev) {
@@ -757,6 +765,9 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
                if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
                    (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
 
+                       /* disable automatic timer on frame reception */
+                       op->flags |= RX_NO_AUTOTIMER;
+
                        /*
                         * Don't care if we're bound or not (due to netdev
                         * problems) can_rx_unregister() is always a save
@@ -785,7 +796,6 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
                                                  bcm_rx_handler, op);
 
                        list_del(&op->list);
-                       synchronize_rcu();
                        bcm_remove_op(op);
                        return 1; /* done */
                }
index 08ce317..30a1603 100644 (file)
@@ -397,16 +397,18 @@ static void list_netdevice(struct net_device *dev)
 /* Device list removal
  * caller must respect a RCU grace period before freeing/reusing dev
  */
-static void unlist_netdevice(struct net_device *dev)
+static void unlist_netdevice(struct net_device *dev, bool lock)
 {
        ASSERT_RTNL();
 
        /* Unlink dev from the device chain */
-       write_lock(&dev_base_lock);
+       if (lock)
+               write_lock(&dev_base_lock);
        list_del_rcu(&dev->dev_list);
        netdev_name_node_del(dev->name_node);
        hlist_del_rcu(&dev->index_hlist);
-       write_unlock(&dev_base_lock);
+       if (lock)
+               write_unlock(&dev_base_lock);
 
        dev_base_seq_inc(dev_net(dev));
 }
@@ -4861,7 +4863,10 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
 }
 
 /* When doing generic XDP we have to bypass the qdisc layer and the
- * network taps in order to match in-driver-XDP behavior.
+ * network taps in order to match in-driver-XDP behavior. This also means
+ * that XDP packets are able to starve other packets going through a qdisc,
+ * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX
+ * queues, so they do not have this starvation issue.
  */
 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
 {
@@ -4873,7 +4878,7 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
        txq = netdev_core_pick_tx(dev, skb, NULL);
        cpu = smp_processor_id();
        HARD_TX_LOCK(dev, txq, cpu);
-       if (!netif_xmit_stopped(txq)) {
+       if (!netif_xmit_frozen_or_drv_stopped(txq)) {
                rc = netdev_start_xmit(skb, dev, txq, 0);
                if (dev_xmit_complete(rc))
                        free_skb = false;
@@ -4881,6 +4886,7 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
        HARD_TX_UNLOCK(dev, txq);
        if (free_skb) {
                trace_xdp_exception(dev, xdp_prog, XDP_TX);
+               dev_core_stats_tx_dropped_inc(dev);
                kfree_skb(skb);
        }
 }
@@ -10043,11 +10049,11 @@ int register_netdevice(struct net_device *dev)
                goto err_uninit;
 
        ret = netdev_register_kobject(dev);
-       if (ret) {
-               dev->reg_state = NETREG_UNREGISTERED;
+       write_lock(&dev_base_lock);
+       dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED;
+       write_unlock(&dev_base_lock);
+       if (ret)
                goto err_uninit;
-       }
-       dev->reg_state = NETREG_REGISTERED;
 
        __netdev_update_features(dev);
 
@@ -10329,7 +10335,9 @@ void netdev_run_todo(void)
                        continue;
                }
 
+               write_lock(&dev_base_lock);
                dev->reg_state = NETREG_UNREGISTERED;
+               write_unlock(&dev_base_lock);
                linkwatch_forget_dev(dev);
        }
 
@@ -10810,9 +10818,10 @@ void unregister_netdevice_many(struct list_head *head)
 
        list_for_each_entry(dev, head, unreg_list) {
                /* And unlink it from device chain. */
-               unlist_netdevice(dev);
-
+               write_lock(&dev_base_lock);
+               unlist_netdevice(dev, false);
                dev->reg_state = NETREG_UNREGISTERING;
+               write_unlock(&dev_base_lock);
        }
        flush_all_backlogs();
 
@@ -10959,7 +10968,7 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
        dev_close(dev);
 
        /* And unlink it from device chain */
-       unlist_netdevice(dev);
+       unlist_netdevice(dev, true);
 
        synchronize_net();
 
index 5af58eb..7950f75 100644 (file)
@@ -6158,7 +6158,6 @@ static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len
        if (err)
                return err;
 
-       ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
        skb_set_transport_header(skb, sizeof(struct ipv6hdr));
 
        return seg6_lookup_nexthop(skb, NULL, 0);
@@ -6516,10 +6515,21 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
                                           ifindex, proto, netns_id, flags);
 
        if (sk) {
-               sk = sk_to_full_sk(sk);
-               if (!sk_fullsock(sk)) {
+               struct sock *sk2 = sk_to_full_sk(sk);
+
+               /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk
+                * sock refcnt is decremented to prevent a request_sock leak.
+                */
+               if (!sk_fullsock(sk2))
+                       sk2 = NULL;
+               if (sk2 != sk) {
                        sock_gen_put(sk);
-                       return NULL;
+                       /* Ensure there is no need to bump sk2 refcnt */
+                       if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) {
+                               WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
+                               return NULL;
+                       }
+                       sk = sk2;
                }
        }
 
@@ -6553,10 +6563,21 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
                                         flags);
 
        if (sk) {
-               sk = sk_to_full_sk(sk);
-               if (!sk_fullsock(sk)) {
+               struct sock *sk2 = sk_to_full_sk(sk);
+
+               /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk
+                * sock refcnt is decremented to prevent a request_sock leak.
+                */
+               if (!sk_fullsock(sk2))
+                       sk2 = NULL;
+               if (sk2 != sk) {
                        sock_gen_put(sk);
-                       return NULL;
+                       /* Ensure there is no need to bump sk2 refcnt */
+                       if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) {
+                               WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
+                               return NULL;
+                       }
+                       sk = sk2;
                }
        }
 
@@ -7020,7 +7041,7 @@ BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len
        if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
                return -EINVAL;
 
-       if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
+       if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies))
                return -EINVAL;
 
        if (!th->ack || th->rst || th->syn)
@@ -7095,7 +7116,7 @@ BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
        if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
                return -EINVAL;
 
-       if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
+       if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies))
                return -ENOENT;
 
        if (!th->syn || th->ack || th->fin || th->rst)
index e319e24..a364256 100644 (file)
@@ -33,6 +33,7 @@ static const char fmt_dec[] = "%d\n";
 static const char fmt_ulong[] = "%lu\n";
 static const char fmt_u64[] = "%llu\n";
 
+/* Caller holds RTNL or dev_base_lock */
 static inline int dev_isalive(const struct net_device *dev)
 {
        return dev->reg_state <= NETREG_REGISTERED;
index 5f85e01..b0ff615 100644 (file)
@@ -64,7 +64,7 @@ u32 secure_tcpv6_ts_off(const struct net *net,
                .daddr = *(struct in6_addr *)daddr,
        };
 
-       if (net->ipv4.sysctl_tcp_timestamps != 1)
+       if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
                return 0;
 
        ts_secret_init();
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
 #ifdef CONFIG_INET
 u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr)
 {
-       if (net->ipv4.sysctl_tcp_timestamps != 1)
+       if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
                return 0;
 
        ts_secret_init();
index 22b983a..b0fcd02 100644 (file)
@@ -699,6 +699,11 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
 
        write_lock_bh(&sk->sk_callback_lock);
 
+       if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
+               psock = ERR_PTR(-EINVAL);
+               goto out;
+       }
+
        if (sk->sk_user_data) {
                psock = ERR_PTR(-EBUSY);
                goto out;
index 3f00a28..5daa1fa 100644 (file)
@@ -387,7 +387,7 @@ void reuseport_stop_listen_sock(struct sock *sk)
                prog = rcu_dereference_protected(reuse->prog,
                                                 lockdep_is_held(&reuseport_lock));
 
-               if (sock_net(sk)->ipv4.sysctl_tcp_migrate_req ||
+               if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req) ||
                    (prog && prog->expected_attach_type == BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)) {
                        /* Migration capable, move sk from the listening section
                         * to the closed section.
@@ -545,7 +545,7 @@ struct sock *reuseport_migrate_sock(struct sock *sk,
        hash = migrating_sk->sk_hash;
        prog = rcu_dereference(reuse->prog);
        if (!prog || prog->expected_attach_type != BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) {
-               if (sock_net(sk)->ipv4.sysctl_tcp_migrate_req)
+               if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req))
                        goto select_by_hash;
                goto failure;
        }
index 2e78458..eb8e128 100644 (file)
@@ -1120,12 +1120,6 @@ static int __init dccp_init(void)
                                  SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
        if (!dccp_hashinfo.bind_bucket_cachep)
                goto out_free_hashinfo2;
-       dccp_hashinfo.bind2_bucket_cachep =
-               kmem_cache_create("dccp_bind2_bucket",
-                                 sizeof(struct inet_bind2_bucket), 0,
-                                 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
-       if (!dccp_hashinfo.bind2_bucket_cachep)
-               goto out_free_bind_bucket_cachep;
 
        /*
         * Size and allocate the main established and bind bucket
@@ -1156,7 +1150,7 @@ static int __init dccp_init(void)
 
        if (!dccp_hashinfo.ehash) {
                DCCP_CRIT("Failed to allocate DCCP established hash table");
-               goto out_free_bind2_bucket_cachep;
+               goto out_free_bind_bucket_cachep;
        }
 
        for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
@@ -1182,23 +1176,14 @@ static int __init dccp_init(void)
                goto out_free_dccp_locks;
        }
 
-       dccp_hashinfo.bhash2 = (struct inet_bind2_hashbucket *)
-               __get_free_pages(GFP_ATOMIC | __GFP_NOWARN, bhash_order);
-
-       if (!dccp_hashinfo.bhash2) {
-               DCCP_CRIT("Failed to allocate DCCP bind2 hash table");
-               goto out_free_dccp_bhash;
-       }
-
        for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
                spin_lock_init(&dccp_hashinfo.bhash[i].lock);
                INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
-               INIT_HLIST_HEAD(&dccp_hashinfo.bhash2[i].chain);
        }
 
        rc = dccp_mib_init();
        if (rc)
-               goto out_free_dccp_bhash2;
+               goto out_free_dccp_bhash;
 
        rc = dccp_ackvec_init();
        if (rc)
@@ -1222,38 +1207,30 @@ out_ackvec_exit:
        dccp_ackvec_exit();
 out_free_dccp_mib:
        dccp_mib_exit();
-out_free_dccp_bhash2:
-       free_pages((unsigned long)dccp_hashinfo.bhash2, bhash_order);
 out_free_dccp_bhash:
        free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
 out_free_dccp_locks:
        inet_ehash_locks_free(&dccp_hashinfo);
 out_free_dccp_ehash:
        free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
-out_free_bind2_bucket_cachep:
-       kmem_cache_destroy(dccp_hashinfo.bind2_bucket_cachep);
 out_free_bind_bucket_cachep:
        kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
 out_free_hashinfo2:
        inet_hashinfo2_free_mod(&dccp_hashinfo);
 out_fail:
        dccp_hashinfo.bhash = NULL;
-       dccp_hashinfo.bhash2 = NULL;
        dccp_hashinfo.ehash = NULL;
        dccp_hashinfo.bind_bucket_cachep = NULL;
-       dccp_hashinfo.bind2_bucket_cachep = NULL;
        return rc;
 }
 
 static void __exit dccp_fini(void)
 {
-       int bhash_order = get_order(dccp_hashinfo.bhash_size *
-                                   sizeof(struct inet_bind_hashbucket));
-
        ccid_cleanup_builtins();
        dccp_mib_exit();
-       free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
-       free_pages((unsigned long)dccp_hashinfo.bhash2, bhash_order);
+       free_pages((unsigned long)dccp_hashinfo.bhash,
+                  get_order(dccp_hashinfo.bhash_size *
+                            sizeof(struct inet_bind_hashbucket)));
        free_pages((unsigned long)dccp_hashinfo.ehash,
                   get_order((dccp_hashinfo.ehash_mask + 1) *
                             sizeof(struct inet_ehash_bucket)));
index dc92a67..7d542eb 100644 (file)
@@ -480,8 +480,8 @@ static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gf
        sk->sk_family      = PF_DECnet;
        sk->sk_protocol    = 0;
        sk->sk_allocation  = gfp;
-       sk->sk_sndbuf      = sysctl_decnet_wmem[1];
-       sk->sk_rcvbuf      = sysctl_decnet_rmem[1];
+       sk->sk_sndbuf      = READ_ONCE(sysctl_decnet_wmem[1]);
+       sk->sk_rcvbuf      = READ_ONCE(sysctl_decnet_rmem[1]);
 
        /* Initialization of DECnet Session Control Port                */
        scp = DN_SK(sk);
index 3738f2d..2dd76eb 100644 (file)
@@ -248,6 +248,7 @@ static void dsa_port_reset_vlan_filtering(struct dsa_port *dp,
        struct netlink_ext_ack extack = {0};
        bool change_vlan_filtering = false;
        struct dsa_switch *ds = dp->ds;
+       struct dsa_port *other_dp;
        bool vlan_filtering;
        int err;
 
@@ -270,8 +271,8 @@ static void dsa_port_reset_vlan_filtering(struct dsa_port *dp,
         * VLAN-aware bridge.
         */
        if (change_vlan_filtering && ds->vlan_filtering_is_global) {
-               dsa_switch_for_each_port(dp, ds) {
-                       struct net_device *br = dsa_port_bridge_dev_get(dp);
+               dsa_switch_for_each_port(other_dp, ds) {
+                       struct net_device *br = dsa_port_bridge_dev_get(other_dp);
 
                        if (br && br_vlan_enabled(br)) {
                                change_vlan_filtering = false;
@@ -799,7 +800,7 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
                ds->vlan_filtering = vlan_filtering;
 
                dsa_switch_for_each_user_port(other_dp, ds) {
-                       struct net_device *slave = dp->slave;
+                       struct net_device *slave = other_dp->slave;
 
                        /* We might be called in the unbind path, so not
                         * all slave devices might still be registered.
index 2b56218..4dfd68c 100644 (file)
@@ -344,6 +344,7 @@ static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
 
        ether_addr_copy(a->addr, addr);
        a->vid = vid;
+       a->db = db;
        refcount_set(&a->refcount, 1);
        list_add_tail(&a->list, &lag->fdbs);
 
index 7e6b37a..1c94bb8 100644 (file)
@@ -36,7 +36,7 @@ static int fallback_set_params(struct eeprom_req_info *request,
        if (request->page)
                offset = request->page * ETH_MODULE_EEPROM_PAGE_LEN + offset;
 
-       if (modinfo->type == ETH_MODULE_SFF_8079 &&
+       if (modinfo->type == ETH_MODULE_SFF_8472 &&
            request->i2c_address == 0x51)
                offset += ETH_MODULE_EEPROM_PAGE_LEN * 2;
 
index 93da9f7..252c8bc 100644 (file)
@@ -217,7 +217,7 @@ int inet_listen(struct socket *sock, int backlog)
                 * because the socket was in TCP_LISTEN state previously but
                 * was shutdown() rather than close().
                 */
-               tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
+               tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
                if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
                    (tcp_fastopen & TFO_SERVER_ENABLE) &&
                    !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
@@ -335,7 +335,7 @@ lookup_protocol:
                        inet->hdrincl = 1;
        }
 
-       if (net->ipv4.sysctl_ip_no_pmtu_disc)
+       if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc))
                inet->pmtudisc = IP_PMTUDISC_DONT;
        else
                inet->pmtudisc = IP_PMTUDISC_WANT;
@@ -1246,7 +1246,7 @@ static int inet_sk_reselect_saddr(struct sock *sk)
        if (new_saddr == old_saddr)
                return 0;
 
-       if (sock_net(sk)->ipv4.sysctl_ip_dynaddr > 1) {
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) > 1) {
                pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
                        __func__, &old_saddr, &new_saddr);
        }
@@ -1301,7 +1301,7 @@ int inet_sk_rebuild_header(struct sock *sk)
                 * Other protocols have to map its equivalent state to TCP_SYN_SENT.
                 * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
                 */
-               if (!sock_net(sk)->ipv4.sysctl_ip_dynaddr ||
+               if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) ||
                    sk->sk_state != TCP_SYN_SENT ||
                    (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
                    (err = inet_sk_reselect_saddr(sk)) != 0)
@@ -1710,24 +1710,14 @@ static const struct net_protocol igmp_protocol = {
 };
 #endif
 
-/* thinking of making this const? Don't.
- * early_demux can change based on sysctl.
- */
-static struct net_protocol tcp_protocol = {
-       .early_demux    =       tcp_v4_early_demux,
-       .early_demux_handler =  tcp_v4_early_demux,
+static const struct net_protocol tcp_protocol = {
        .handler        =       tcp_v4_rcv,
        .err_handler    =       tcp_v4_err,
        .no_policy      =       1,
        .icmp_strict_tag_validation = 1,
 };
 
-/* thinking of making this const? Don't.
- * early_demux can change based on sysctl.
- */
-static struct net_protocol udp_protocol = {
-       .early_demux =  udp_v4_early_demux,
-       .early_demux_handler =  udp_v4_early_demux,
+static const struct net_protocol udp_protocol = {
        .handler =      udp_rcv,
        .err_handler =  udp_err,
        .no_policy =    1,
index 6eea1e9..f8ad044 100644 (file)
@@ -507,7 +507,7 @@ static int ah_init_state(struct xfrm_state *x)
 
        if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
            crypto_ahash_digestsize(ahash)) {
-               pr_info("%s: %s digestsize %u != %hu\n",
+               pr_info("%s: %s digestsize %u != %u\n",
                        __func__, x->aalg->alg_name,
                        crypto_ahash_digestsize(ahash),
                        aalg_desc->uinfo.auth.icv_fullbits / 8);
index 62d5f99..6cd3b6c 100644 (file)
@@ -239,7 +239,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
        struct cipso_v4_map_cache_entry *prev_entry = NULL;
        u32 hash;
 
-       if (!cipso_v4_cache_enabled)
+       if (!READ_ONCE(cipso_v4_cache_enabled))
                return -ENOENT;
 
        hash = cipso_v4_map_cache_hash(key, key_len);
@@ -296,13 +296,14 @@ static int cipso_v4_cache_check(const unsigned char *key,
 int cipso_v4_cache_add(const unsigned char *cipso_ptr,
                       const struct netlbl_lsm_secattr *secattr)
 {
+       int bkt_size = READ_ONCE(cipso_v4_cache_bucketsize);
        int ret_val = -EPERM;
        u32 bkt;
        struct cipso_v4_map_cache_entry *entry = NULL;
        struct cipso_v4_map_cache_entry *old_entry = NULL;
        u32 cipso_ptr_len;
 
-       if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0)
+       if (!READ_ONCE(cipso_v4_cache_enabled) || bkt_size <= 0)
                return 0;
 
        cipso_ptr_len = cipso_ptr[1];
@@ -322,7 +323,7 @@ int cipso_v4_cache_add(const unsigned char *cipso_ptr,
 
        bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1);
        spin_lock_bh(&cipso_v4_cache[bkt].lock);
-       if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) {
+       if (cipso_v4_cache[bkt].size < bkt_size) {
                list_add(&entry->list, &cipso_v4_cache[bkt].list);
                cipso_v4_cache[bkt].size += 1;
        } else {
@@ -1199,7 +1200,8 @@ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def,
                /* This will send packets using the "optimized" format when
                 * possible as specified in  section 3.4.2.6 of the
                 * CIPSO draft. */
-               if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10)
+               if (READ_ONCE(cipso_v4_rbm_optfmt) && ret_val > 0 &&
+                   ret_val <= 10)
                        tag_len = 14;
                else
                        tag_len = 4 + ret_val;
@@ -1603,7 +1605,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
                         * all the CIPSO validations here but it doesn't
                         * really specify _exactly_ what we need to validate
                         * ... so, just make it a sysctl tunable. */
-                       if (cipso_v4_rbm_strictvalid) {
+                       if (READ_ONCE(cipso_v4_rbm_strictvalid)) {
                                if (cipso_v4_map_lvl_valid(doi_def,
                                                           tag[3]) < 0) {
                                        err_offset = opt_iter + 3;
index b21238d..b694f35 100644 (file)
@@ -1108,7 +1108,7 @@ static int esp_init_authenc(struct xfrm_state *x)
                err = -EINVAL;
                if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
                    crypto_aead_authsize(aead)) {
-                       pr_info("ESP: %s digestsize %u != %hu\n",
+                       pr_info("ESP: %s digestsize %u != %u\n",
                                x->aalg->alg_name,
                                crypto_aead_authsize(aead),
                                aalg_desc->uinfo.auth.icv_fullbits / 8);
index a57ba23..db7b250 100644 (file)
@@ -1230,7 +1230,7 @@ static int fib_check_nh_nongw(struct net *net, struct fib_nh *nh,
 
        nh->fib_nh_dev = in_dev->dev;
        dev_hold_track(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC);
-       nh->fib_nh_scope = RT_SCOPE_HOST;
+       nh->fib_nh_scope = RT_SCOPE_LINK;
        if (!netif_carrier_ok(nh->fib_nh_dev))
                nh->fib_nh_flags |= RTNH_F_LINKDOWN;
        err = 0;
@@ -1811,7 +1811,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                        goto nla_put_failure;
                if (nexthop_is_blackhole(fi->nh))
                        rtm->rtm_type = RTN_BLACKHOLE;
-               if (!fi->fib_net->ipv4.sysctl_nexthop_compat_mode)
+               if (!READ_ONCE(fi->fib_net->ipv4.sysctl_nexthop_compat_mode))
                        goto offload;
        }
 
@@ -2216,7 +2216,7 @@ void fib_select_multipath(struct fib_result *res, int hash)
        }
 
        change_nexthops(fi) {
-               if (net->ipv4.sysctl_fib_multipath_use_neigh) {
+               if (READ_ONCE(net->ipv4.sysctl_fib_multipath_use_neigh)) {
                        if (!fib_good_nh(nexthop_nh))
                                continue;
                        if (!first) {
index 2734c3a..452ff17 100644 (file)
@@ -498,7 +498,7 @@ static void tnode_free(struct key_vector *tn)
                tn = container_of(head, struct tnode, rcu)->kv;
        }
 
-       if (tnode_free_size >= sysctl_fib_sync_mem) {
+       if (tnode_free_size >= READ_ONCE(sysctl_fib_sync_mem)) {
                tnode_free_size = 0;
                synchronize_rcu();
        }
@@ -1042,6 +1042,7 @@ fib_find_matching_alias(struct net *net, const struct fib_rt_info *fri)
 
 void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri)
 {
+       u8 fib_notify_on_flag_change;
        struct fib_alias *fa_match;
        struct sk_buff *skb;
        int err;
@@ -1063,14 +1064,16 @@ void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri)
        WRITE_ONCE(fa_match->offload, fri->offload);
        WRITE_ONCE(fa_match->trap, fri->trap);
 
+       fib_notify_on_flag_change = READ_ONCE(net->ipv4.sysctl_fib_notify_on_flag_change);
+
        /* 2 means send notifications only if offload_failed was changed. */
-       if (net->ipv4.sysctl_fib_notify_on_flag_change == 2 &&
+       if (fib_notify_on_flag_change == 2 &&
            READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
                goto out;
 
        WRITE_ONCE(fa_match->offload_failed, fri->offload_failed);
 
-       if (!net->ipv4.sysctl_fib_notify_on_flag_change)
+       if (!fib_notify_on_flag_change)
                goto out;
 
        skb = nlmsg_new(fib_nlmsg_size(fa_match->fa_info), GFP_ATOMIC);
index efea0e7..d5d745c 100644 (file)
@@ -253,11 +253,12 @@ bool icmp_global_allow(void)
        spin_lock(&icmp_global.lock);
        delta = min_t(u32, now - icmp_global.stamp, HZ);
        if (delta >= HZ / 50) {
-               incr = sysctl_icmp_msgs_per_sec * delta / HZ ;
+               incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ;
                if (incr)
                        WRITE_ONCE(icmp_global.stamp, now);
        }
-       credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
+       credit = min_t(u32, icmp_global.credit + incr,
+                      READ_ONCE(sysctl_icmp_msgs_burst));
        if (credit) {
                /* We want to use a credit of one in average, but need to randomize
                 * it for security reasons.
@@ -281,7 +282,7 @@ static bool icmpv4_mask_allow(struct net *net, int type, int code)
                return true;
 
        /* Limit if icmp type is enabled in ratemask. */
-       if (!((1 << type) & net->ipv4.sysctl_icmp_ratemask))
+       if (!((1 << type) & READ_ONCE(net->ipv4.sysctl_icmp_ratemask)))
                return true;
 
        return false;
@@ -319,7 +320,8 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
 
        vif = l3mdev_master_ifindex(dst->dev);
        peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
-       rc = inet_peer_xrlim_allow(peer, net->ipv4.sysctl_icmp_ratelimit);
+       rc = inet_peer_xrlim_allow(peer,
+                                  READ_ONCE(net->ipv4.sysctl_icmp_ratelimit));
        if (peer)
                inet_putpeer(peer);
 out:
@@ -692,7 +694,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
 
                rcu_read_lock();
                if (rt_is_input_route(rt) &&
-                   net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
+                   READ_ONCE(net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr))
                        dev = dev_get_by_index_rcu(net, inet_iif(skb_in));
 
                if (dev)
@@ -879,7 +881,7 @@ static enum skb_drop_reason icmp_unreach(struct sk_buff *skb)
                         * values please see
                         * Documentation/networking/ip-sysctl.rst
                         */
-                       switch (net->ipv4.sysctl_ip_no_pmtu_disc) {
+                       switch (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) {
                        default:
                                net_dbg_ratelimited("%pI4: fragmentation needed and DF set\n",
                                                    &iph->daddr);
@@ -932,7 +934,7 @@ static enum skb_drop_reason icmp_unreach(struct sk_buff *skb)
         *      get the other vendor to fix their kit.
         */
 
-       if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
+       if (!READ_ONCE(net->ipv4.sysctl_icmp_ignore_bogus_error_responses) &&
            inet_addr_type_dev_table(net, skb->dev, iph->daddr) == RTN_BROADCAST) {
                net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
                                     &ip_hdr(skb)->saddr,
@@ -992,7 +994,7 @@ static enum skb_drop_reason icmp_echo(struct sk_buff *skb)
 
        net = dev_net(skb_dst(skb)->dev);
        /* should there be an ICMP stat for ignored echos? */
-       if (net->ipv4.sysctl_icmp_echo_ignore_all)
+       if (READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_all))
                return SKB_NOT_DROPPED_YET;
 
        icmp_param.data.icmph      = *icmp_hdr(skb);
@@ -1027,7 +1029,7 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
        u16 ident_len;
        u8 status;
 
-       if (!net->ipv4.sysctl_icmp_echo_enable_probe)
+       if (!READ_ONCE(net->ipv4.sysctl_icmp_echo_enable_probe))
                return false;
 
        /* We currently only support probing interfaces on the proxy node
@@ -1248,7 +1250,7 @@ int icmp_rcv(struct sk_buff *skb)
                 */
                if ((icmph->type == ICMP_ECHO ||
                     icmph->type == ICMP_TIMESTAMP) &&
-                   net->ipv4.sysctl_icmp_echo_ignore_broadcasts) {
+                   READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_broadcasts)) {
                        reason = SKB_DROP_REASON_INVALID_PROTO;
                        goto error;
                }
index b65d074..e3ab0cb 100644 (file)
@@ -467,7 +467,8 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
 
        if (pmc->multiaddr == IGMP_ALL_HOSTS)
                return skb;
-       if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
+       if (ipv4_is_local_multicast(pmc->multiaddr) &&
+           !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
                return skb;
 
        mtu = READ_ONCE(dev->mtu);
@@ -593,7 +594,7 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
                        if (pmc->multiaddr == IGMP_ALL_HOSTS)
                                continue;
                        if (ipv4_is_local_multicast(pmc->multiaddr) &&
-                            !net->ipv4.sysctl_igmp_llm_reports)
+                           !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
                                continue;
                        spin_lock_bh(&pmc->lock);
                        if (pmc->sfcount[MCAST_EXCLUDE])
@@ -736,7 +737,8 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
        if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
                return igmpv3_send_report(in_dev, pmc);
 
-       if (ipv4_is_local_multicast(group) && !net->ipv4.sysctl_igmp_llm_reports)
+       if (ipv4_is_local_multicast(group) &&
+           !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
                return 0;
 
        if (type == IGMP_HOST_LEAVE_MESSAGE)
@@ -825,7 +827,7 @@ static void igmp_ifc_event(struct in_device *in_dev)
        struct net *net = dev_net(in_dev->dev);
        if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
                return;
-       WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv);
+       WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv));
        igmp_ifc_start_timer(in_dev, 1);
 }
 
@@ -920,7 +922,8 @@ static bool igmp_heard_report(struct in_device *in_dev, __be32 group)
 
        if (group == IGMP_ALL_HOSTS)
                return false;
-       if (ipv4_is_local_multicast(group) && !net->ipv4.sysctl_igmp_llm_reports)
+       if (ipv4_is_local_multicast(group) &&
+           !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
                return false;
 
        rcu_read_lock();
@@ -1006,7 +1009,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                 * received value was zero, use the default or statically
                 * configured value.
                 */
-               in_dev->mr_qrv = ih3->qrv ?: net->ipv4.sysctl_igmp_qrv;
+               in_dev->mr_qrv = ih3->qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
                in_dev->mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL;
 
                /* RFC3376, 8.3. Query Response Interval:
@@ -1045,7 +1048,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                if (im->multiaddr == IGMP_ALL_HOSTS)
                        continue;
                if (ipv4_is_local_multicast(im->multiaddr) &&
-                   !net->ipv4.sysctl_igmp_llm_reports)
+                   !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
                        continue;
                spin_lock_bh(&im->lock);
                if (im->tm_running)
@@ -1186,7 +1189,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im,
        pmc->interface = im->interface;
        in_dev_hold(in_dev);
        pmc->multiaddr = im->multiaddr;
-       pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+       pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
        pmc->sfmode = im->sfmode;
        if (pmc->sfmode == MCAST_INCLUDE) {
                struct ip_sf_list *psf;
@@ -1237,9 +1240,11 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
                        swap(im->tomb, pmc->tomb);
                        swap(im->sources, pmc->sources);
                        for (psf = im->sources; psf; psf = psf->sf_next)
-                               psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+                               psf->sf_crcount = in_dev->mr_qrv ?:
+                                       READ_ONCE(net->ipv4.sysctl_igmp_qrv);
                } else {
-                       im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+                       im->crcount = in_dev->mr_qrv ?:
+                               READ_ONCE(net->ipv4.sysctl_igmp_qrv);
                }
                in_dev_put(pmc->interface);
                kfree_pmc(pmc);
@@ -1296,7 +1301,8 @@ static void __igmp_group_dropped(struct ip_mc_list *im, gfp_t gfp)
 #ifdef CONFIG_IP_MULTICAST
        if (im->multiaddr == IGMP_ALL_HOSTS)
                return;
-       if (ipv4_is_local_multicast(im->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
+       if (ipv4_is_local_multicast(im->multiaddr) &&
+           !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
                return;
 
        reporter = im->reporter;
@@ -1338,13 +1344,14 @@ static void igmp_group_added(struct ip_mc_list *im)
 #ifdef CONFIG_IP_MULTICAST
        if (im->multiaddr == IGMP_ALL_HOSTS)
                return;
-       if (ipv4_is_local_multicast(im->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
+       if (ipv4_is_local_multicast(im->multiaddr) &&
+           !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
                return;
 
        if (in_dev->dead)
                return;
 
-       im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
+       im->unsolicit_count = READ_ONCE(net->ipv4.sysctl_igmp_qrv);
        if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
                spin_lock_bh(&im->lock);
                igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
@@ -1358,7 +1365,7 @@ static void igmp_group_added(struct ip_mc_list *im)
         * IN() to IN(A).
         */
        if (im->sfmode == MCAST_EXCLUDE)
-               im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+               im->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
 
        igmp_ifc_event(in_dev);
 #endif
@@ -1642,7 +1649,7 @@ static void ip_mc_rejoin_groups(struct in_device *in_dev)
                if (im->multiaddr == IGMP_ALL_HOSTS)
                        continue;
                if (ipv4_is_local_multicast(im->multiaddr) &&
-                   !net->ipv4.sysctl_igmp_llm_reports)
+                   !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports))
                        continue;
 
                /* a failover is happening and switches
@@ -1749,7 +1756,7 @@ static void ip_mc_reset(struct in_device *in_dev)
 
        in_dev->mr_qi = IGMP_QUERY_INTERVAL;
        in_dev->mr_qri = IGMP_QUERY_RESPONSE_INTERVAL;
-       in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
+       in_dev->mr_qrv = READ_ONCE(net->ipv4.sysctl_igmp_qrv);
 }
 #else
 static void ip_mc_reset(struct in_device *in_dev)
@@ -1883,7 +1890,7 @@ static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
 #ifdef CONFIG_IP_MULTICAST
                if (psf->sf_oldin &&
                    !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
-                       psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+                       psf->sf_crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
                        psf->sf_next = pmc->tomb;
                        pmc->tomb = psf;
                        rv = 1;
@@ -1947,7 +1954,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
                /* filter mode change */
                pmc->sfmode = MCAST_INCLUDE;
 #ifdef CONFIG_IP_MULTICAST
-               pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+               pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
                WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
                for (psf = pmc->sources; psf; psf = psf->sf_next)
                        psf->sf_crcount = 0;
@@ -2126,7 +2133,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
 #ifdef CONFIG_IP_MULTICAST
                /* else no filters; keep old mode for reports */
 
-               pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+               pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
                WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
                for (psf = pmc->sources; psf; psf = psf->sf_next)
                        psf->sf_crcount = 0;
@@ -2192,7 +2199,7 @@ static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
                count++;
        }
        err = -ENOBUFS;
-       if (count >= net->ipv4.sysctl_igmp_max_memberships)
+       if (count >= READ_ONCE(net->ipv4.sysctl_igmp_max_memberships))
                goto done;
        iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
        if (!iml)
@@ -2379,7 +2386,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
        }
        /* else, add a new source to the filter */
 
-       if (psl && psl->sl_count >= net->ipv4.sysctl_igmp_max_msf) {
+       if (psl && psl->sl_count >= READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) {
                err = -ENOBUFS;
                goto done;
        }
index c0b7e6c..eb31c71 100644 (file)
@@ -117,32 +117,6 @@ bool inet_rcv_saddr_any(const struct sock *sk)
        return !sk->sk_rcv_saddr;
 }
 
-static bool use_bhash2_on_bind(const struct sock *sk)
-{
-#if IS_ENABLED(CONFIG_IPV6)
-       int addr_type;
-
-       if (sk->sk_family == AF_INET6) {
-               addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
-               return addr_type != IPV6_ADDR_ANY &&
-                       addr_type != IPV6_ADDR_MAPPED;
-       }
-#endif
-       return sk->sk_rcv_saddr != htonl(INADDR_ANY);
-}
-
-static u32 get_bhash2_nulladdr_hash(const struct sock *sk, struct net *net,
-                                   int port)
-{
-#if IS_ENABLED(CONFIG_IPV6)
-       struct in6_addr nulladdr = {};
-
-       if (sk->sk_family == AF_INET6)
-               return ipv6_portaddr_hash(net, &nulladdr, port);
-#endif
-       return ipv4_portaddr_hash(net, 0, port);
-}
-
 void inet_get_local_port_range(struct net *net, int *low, int *high)
 {
        unsigned int seq;
@@ -156,71 +130,16 @@ void inet_get_local_port_range(struct net *net, int *low, int *high)
 }
 EXPORT_SYMBOL(inet_get_local_port_range);
 
-static bool bind_conflict_exist(const struct sock *sk, struct sock *sk2,
-                               kuid_t sk_uid, bool relax,
-                               bool reuseport_cb_ok, bool reuseport_ok)
-{
-       int bound_dev_if2;
-
-       if (sk == sk2)
-               return false;
-
-       bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if);
-
-       if (!sk->sk_bound_dev_if || !bound_dev_if2 ||
-           sk->sk_bound_dev_if == bound_dev_if2) {
-               if (sk->sk_reuse && sk2->sk_reuse &&
-                   sk2->sk_state != TCP_LISTEN) {
-                       if (!relax || (!reuseport_ok && sk->sk_reuseport &&
-                                      sk2->sk_reuseport && reuseport_cb_ok &&
-                                      (sk2->sk_state == TCP_TIME_WAIT ||
-                                       uid_eq(sk_uid, sock_i_uid(sk2)))))
-                               return true;
-               } else if (!reuseport_ok || !sk->sk_reuseport ||
-                          !sk2->sk_reuseport || !reuseport_cb_ok ||
-                          (sk2->sk_state != TCP_TIME_WAIT &&
-                           !uid_eq(sk_uid, sock_i_uid(sk2)))) {
-                       return true;
-               }
-       }
-       return false;
-}
-
-static bool check_bhash2_conflict(const struct sock *sk,
-                                 struct inet_bind2_bucket *tb2, kuid_t sk_uid,
-                                 bool relax, bool reuseport_cb_ok,
-                                 bool reuseport_ok)
-{
-       struct sock *sk2;
-
-       sk_for_each_bound_bhash2(sk2, &tb2->owners) {
-               if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
-                       continue;
-
-               if (bind_conflict_exist(sk, sk2, sk_uid, relax,
-                                       reuseport_cb_ok, reuseport_ok))
-                       return true;
-       }
-       return false;
-}
-
-/* This should be called only when the corresponding inet_bind_bucket spinlock
- * is held
- */
-static int inet_csk_bind_conflict(const struct sock *sk, int port,
-                                 struct inet_bind_bucket *tb,
-                                 struct inet_bind2_bucket *tb2, /* may be null */
+static int inet_csk_bind_conflict(const struct sock *sk,
+                                 const struct inet_bind_bucket *tb,
                                  bool relax, bool reuseport_ok)
 {
-       struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
-       kuid_t uid = sock_i_uid((struct sock *)sk);
-       struct sock_reuseport *reuseport_cb;
-       struct inet_bind2_hashbucket *head2;
-       bool reuseport_cb_ok;
        struct sock *sk2;
-       struct net *net;
-       int l3mdev;
-       u32 hash;
+       bool reuseport_cb_ok;
+       bool reuse = sk->sk_reuse;
+       bool reuseport = !!sk->sk_reuseport;
+       struct sock_reuseport *reuseport_cb;
+       kuid_t uid = sock_i_uid((struct sock *)sk);
 
        rcu_read_lock();
        reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
@@ -231,42 +150,40 @@ static int inet_csk_bind_conflict(const struct sock *sk, int port,
        /*
         * Unlike other sk lookup places we do not check
         * for sk_net here, since _all_ the socks listed
-        * in tb->owners and tb2->owners list belong
-        * to the same net
+        * in tb->owners list belong to the same net - the
+        * one this bucket belongs to.
         */
 
-       if (!use_bhash2_on_bind(sk)) {
-               sk_for_each_bound(sk2, &tb->owners)
-                       if (bind_conflict_exist(sk, sk2, uid, relax,
-                                               reuseport_cb_ok, reuseport_ok) &&
-                           inet_rcv_saddr_equal(sk, sk2, true))
-                               return true;
+       sk_for_each_bound(sk2, &tb->owners) {
+               int bound_dev_if2;
 
-               return false;
+               if (sk == sk2)
+                       continue;
+               bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if);
+               if ((!sk->sk_bound_dev_if ||
+                    !bound_dev_if2 ||
+                    sk->sk_bound_dev_if == bound_dev_if2)) {
+                       if (reuse && sk2->sk_reuse &&
+                           sk2->sk_state != TCP_LISTEN) {
+                               if ((!relax ||
+                                    (!reuseport_ok &&
+                                     reuseport && sk2->sk_reuseport &&
+                                     reuseport_cb_ok &&
+                                     (sk2->sk_state == TCP_TIME_WAIT ||
+                                      uid_eq(uid, sock_i_uid(sk2))))) &&
+                                   inet_rcv_saddr_equal(sk, sk2, true))
+                                       break;
+                       } else if (!reuseport_ok ||
+                                  !reuseport || !sk2->sk_reuseport ||
+                                  !reuseport_cb_ok ||
+                                  (sk2->sk_state != TCP_TIME_WAIT &&
+                                   !uid_eq(uid, sock_i_uid(sk2)))) {
+                               if (inet_rcv_saddr_equal(sk, sk2, true))
+                                       break;
+                       }
+               }
        }
-
-       if (tb2 && check_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
-                                        reuseport_ok))
-               return true;
-
-       net = sock_net(sk);
-
-       /* check there's no conflict with an existing IPV6_ADDR_ANY (if ipv6) or
-        * INADDR_ANY (if ipv4) socket.
-        */
-       hash = get_bhash2_nulladdr_hash(sk, net, port);
-       head2 = &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
-
-       l3mdev = inet_sk_bound_l3mdev(sk);
-       inet_bind_bucket_for_each(tb2, &head2->chain)
-               if (check_bind2_bucket_match_nulladdr(tb2, net, port, l3mdev, sk))
-                       break;
-
-       if (tb2 && check_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
-                                        reuseport_ok))
-               return true;
-
-       return false;
+       return sk2 != NULL;
 }
 
 /*
@@ -274,20 +191,16 @@ static int inet_csk_bind_conflict(const struct sock *sk, int port,
  * inet_bind_hashbucket lock held.
  */
 static struct inet_bind_hashbucket *
-inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret,
-                       struct inet_bind2_bucket **tb2_ret,
-                       struct inet_bind2_hashbucket **head2_ret, int *port_ret)
+inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *port_ret)
 {
        struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
-       struct inet_bind2_hashbucket *head2;
+       int port = 0;
        struct inet_bind_hashbucket *head;
        struct net *net = sock_net(sk);
+       bool relax = false;
        int i, low, high, attempt_half;
-       struct inet_bind2_bucket *tb2;
        struct inet_bind_bucket *tb;
        u32 remaining, offset;
-       bool relax = false;
-       int port = 0;
        int l3mdev;
 
        l3mdev = inet_sk_bound_l3mdev(sk);
@@ -326,12 +239,10 @@ other_parity_scan:
                head = &hinfo->bhash[inet_bhashfn(net, port,
                                                  hinfo->bhash_size)];
                spin_lock_bh(&head->lock);
-               tb2 = inet_bind2_bucket_find(hinfo, net, port, l3mdev, sk,
-                                            &head2);
                inet_bind_bucket_for_each(tb, &head->chain)
-                       if (check_bind_bucket_match(tb, net, port, l3mdev)) {
-                               if (!inet_csk_bind_conflict(sk, port, tb, tb2,
-                                                           relax, false))
+                       if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
+                           tb->port == port) {
+                               if (!inet_csk_bind_conflict(sk, tb, relax, false))
                                        goto success;
                                goto next_port;
                        }
@@ -352,7 +263,7 @@ next_port:
                goto other_half_scan;
        }
 
-       if (net->ipv4.sysctl_ip_autobind_reuse && !relax) {
+       if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) {
                /* We still have a chance to connect to different destinations */
                relax = true;
                goto ports_exhausted;
@@ -361,8 +272,6 @@ next_port:
 success:
        *port_ret = port;
        *tb_ret = tb;
-       *tb2_ret = tb2;
-       *head2_ret = head2;
        return head;
 }
 
@@ -458,81 +367,54 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
 {
        bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
        struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
-       bool bhash_created = false, bhash2_created = false;
-       struct inet_bind2_bucket *tb2 = NULL;
-       struct inet_bind2_hashbucket *head2;
-       struct inet_bind_bucket *tb = NULL;
+       int ret = 1, port = snum;
        struct inet_bind_hashbucket *head;
        struct net *net = sock_net(sk);
-       int ret = 1, port = snum;
-       bool found_port = false;
+       struct inet_bind_bucket *tb = NULL;
        int l3mdev;
 
        l3mdev = inet_sk_bound_l3mdev(sk);
 
        if (!port) {
-               head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port);
+               head = inet_csk_find_open_port(sk, &tb, &port);
                if (!head)
                        return ret;
-               if (tb && tb2)
-                       goto success;
-               found_port = true;
-       } else {
-               head = &hinfo->bhash[inet_bhashfn(net, port,
-                                                 hinfo->bhash_size)];
-               spin_lock_bh(&head->lock);
-               inet_bind_bucket_for_each(tb, &head->chain)
-                       if (check_bind_bucket_match(tb, net, port, l3mdev))
-                               break;
-
-               tb2 = inet_bind2_bucket_find(hinfo, net, port, l3mdev, sk,
-                                            &head2);
-       }
-
-       if (!tb) {
-               tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net,
-                                            head, port, l3mdev);
                if (!tb)
-                       goto fail_unlock;
-               bhash_created = true;
-       }
-
-       if (!tb2) {
-               tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep,
-                                              net, head2, port, l3mdev, sk);
-               if (!tb2)
-                       goto fail_unlock;
-               bhash2_created = true;
+                       goto tb_not_found;
+               goto success;
        }
-
-       /* If we had to find an open port, we already checked for conflicts */
-       if (!found_port && !hlist_empty(&tb->owners)) {
+       head = &hinfo->bhash[inet_bhashfn(net, port,
+                                         hinfo->bhash_size)];
+       spin_lock_bh(&head->lock);
+       inet_bind_bucket_for_each(tb, &head->chain)
+               if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
+                   tb->port == port)
+                       goto tb_found;
+tb_not_found:
+       tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
+                                    net, head, port, l3mdev);
+       if (!tb)
+               goto fail_unlock;
+tb_found:
+       if (!hlist_empty(&tb->owners)) {
                if (sk->sk_reuse == SK_FORCE_REUSE)
                        goto success;
 
                if ((tb->fastreuse > 0 && reuse) ||
                    sk_reuseport_match(tb, sk))
                        goto success;
-               if (inet_csk_bind_conflict(sk, port, tb, tb2, true, true))
+               if (inet_csk_bind_conflict(sk, tb, true, true))
                        goto fail_unlock;
        }
 success:
        inet_csk_update_fastreuse(tb, sk);
 
        if (!inet_csk(sk)->icsk_bind_hash)
-               inet_bind_hash(sk, tb, tb2, port);
+               inet_bind_hash(sk, tb, port);
        WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
-       WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2);
        ret = 0;
 
 fail_unlock:
-       if (ret) {
-               if (bhash_created)
-                       inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
-               if (bhash2_created)
-                       inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep,
-                                                 tb2);
-       }
        spin_unlock_bh(&head->lock);
        return ret;
 }
@@ -951,7 +833,8 @@ static void reqsk_timer_handler(struct timer_list *t)
 
        icsk = inet_csk(sk_listener);
        net = sock_net(sk_listener);
-       max_syn_ack_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
+       max_syn_ack_retries = icsk->icsk_syn_retries ? :
+               READ_ONCE(net->ipv4.sysctl_tcp_synack_retries);
        /* Normally all the openreqs are young and become mature
         * (i.e. converted to established socket) for first timeout.
         * If synack was not acknowledged for 1 second, it means
@@ -1079,7 +962,6 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
 
                inet_sk_set_state(newsk, TCP_SYN_RECV);
                newicsk->icsk_bind_hash = NULL;
-               newicsk->icsk_bind2_hash = NULL;
 
                inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
                inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
index 545f91b..b9d995b 100644 (file)
@@ -81,41 +81,6 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
        return tb;
 }
 
-struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep,
-                                                  struct net *net,
-                                                  struct inet_bind2_hashbucket *head,
-                                                  const unsigned short port,
-                                                  int l3mdev,
-                                                  const struct sock *sk)
-{
-       struct inet_bind2_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
-
-       if (tb) {
-               write_pnet(&tb->ib_net, net);
-               tb->l3mdev    = l3mdev;
-               tb->port      = port;
-#if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == AF_INET6)
-                       tb->v6_rcv_saddr = sk->sk_v6_rcv_saddr;
-               else
-#endif
-                       tb->rcv_saddr = sk->sk_rcv_saddr;
-               INIT_HLIST_HEAD(&tb->owners);
-               hlist_add_head(&tb->node, &head->chain);
-       }
-       return tb;
-}
-
-static bool bind2_bucket_addr_match(struct inet_bind2_bucket *tb2, struct sock *sk)
-{
-#if IS_ENABLED(CONFIG_IPV6)
-       if (sk->sk_family == AF_INET6)
-               return ipv6_addr_equal(&tb2->v6_rcv_saddr,
-                                      &sk->sk_v6_rcv_saddr);
-#endif
-       return tb2->rcv_saddr == sk->sk_rcv_saddr;
-}
-
 /*
  * Caller must hold hashbucket lock for this tb with local BH disabled
  */
@@ -127,25 +92,12 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket
        }
 }
 
-/* Caller must hold the lock for the corresponding hashbucket in the bhash table
- * with local BH disabled
- */
-void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb)
-{
-       if (hlist_empty(&tb->owners)) {
-               __hlist_del(&tb->node);
-               kmem_cache_free(cachep, tb);
-       }
-}
-
 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
-                   struct inet_bind2_bucket *tb2, const unsigned short snum)
+                   const unsigned short snum)
 {
        inet_sk(sk)->inet_num = snum;
        sk_add_bind_node(sk, &tb->owners);
        inet_csk(sk)->icsk_bind_hash = tb;
-       sk_add_bind2_node(sk, &tb2->owners);
-       inet_csk(sk)->icsk_bind2_hash = tb2;
 }
 
 /*
@@ -157,7 +109,6 @@ static void __inet_put_port(struct sock *sk)
        const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num,
                        hashinfo->bhash_size);
        struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
-       struct inet_bind2_bucket *tb2;
        struct inet_bind_bucket *tb;
 
        spin_lock(&head->lock);
@@ -166,13 +117,6 @@ static void __inet_put_port(struct sock *sk)
        inet_csk(sk)->icsk_bind_hash = NULL;
        inet_sk(sk)->inet_num = 0;
        inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
-
-       if (inet_csk(sk)->icsk_bind2_hash) {
-               tb2 = inet_csk(sk)->icsk_bind2_hash;
-               __sk_del_bind2_node(sk);
-               inet_csk(sk)->icsk_bind2_hash = NULL;
-               inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2);
-       }
        spin_unlock(&head->lock);
 }
 
@@ -189,19 +133,14 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
        struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
        unsigned short port = inet_sk(child)->inet_num;
        const int bhash = inet_bhashfn(sock_net(sk), port,
-                                      table->bhash_size);
+                       table->bhash_size);
        struct inet_bind_hashbucket *head = &table->bhash[bhash];
-       struct inet_bind2_hashbucket *head_bhash2;
-       bool created_inet_bind_bucket = false;
-       struct net *net = sock_net(sk);
-       struct inet_bind2_bucket *tb2;
        struct inet_bind_bucket *tb;
        int l3mdev;
 
        spin_lock(&head->lock);
        tb = inet_csk(sk)->icsk_bind_hash;
-       tb2 = inet_csk(sk)->icsk_bind2_hash;
-       if (unlikely(!tb || !tb2)) {
+       if (unlikely(!tb)) {
                spin_unlock(&head->lock);
                return -ENOENT;
        }
@@ -214,45 +153,25 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
                 * as that of the child socket. We have to look up or
                 * create a new bind bucket for the child here. */
                inet_bind_bucket_for_each(tb, &head->chain) {
-                       if (check_bind_bucket_match(tb, net, port, l3mdev))
+                       if (net_eq(ib_net(tb), sock_net(sk)) &&
+                           tb->l3mdev == l3mdev && tb->port == port)
                                break;
                }
                if (!tb) {
                        tb = inet_bind_bucket_create(table->bind_bucket_cachep,
-                                                    net, head, port, l3mdev);
+                                                    sock_net(sk), head, port,
+                                                    l3mdev);
                        if (!tb) {
                                spin_unlock(&head->lock);
                                return -ENOMEM;
                        }
-                       created_inet_bind_bucket = true;
                }
                inet_csk_update_fastreuse(tb, child);
-
-               goto bhash2_find;
-       } else if (!bind2_bucket_addr_match(tb2, child)) {
-               l3mdev = inet_sk_bound_l3mdev(sk);
-
-bhash2_find:
-               tb2 = inet_bind2_bucket_find(table, net, port, l3mdev, child,
-                                            &head_bhash2);
-               if (!tb2) {
-                       tb2 = inet_bind2_bucket_create(table->bind2_bucket_cachep,
-                                                      net, head_bhash2, port,
-                                                      l3mdev, child);
-                       if (!tb2)
-                               goto error;
-               }
        }
-       inet_bind_hash(child, tb, tb2, port);
+       inet_bind_hash(child, tb, port);
        spin_unlock(&head->lock);
 
        return 0;
-
-error:
-       if (created_inet_bind_bucket)
-               inet_bind_bucket_destroy(table->bind_bucket_cachep, tb);
-       spin_unlock(&head->lock);
-       return -ENOMEM;
 }
 EXPORT_SYMBOL_GPL(__inet_inherit_port);
 
@@ -756,76 +675,6 @@ void inet_unhash(struct sock *sk)
 }
 EXPORT_SYMBOL_GPL(inet_unhash);
 
-static bool check_bind2_bucket_match(struct inet_bind2_bucket *tb,
-                                    struct net *net, unsigned short port,
-                                    int l3mdev, struct sock *sk)
-{
-#if IS_ENABLED(CONFIG_IPV6)
-       if (sk->sk_family == AF_INET6)
-               return net_eq(ib2_net(tb), net) && tb->port == port &&
-                       tb->l3mdev == l3mdev &&
-                       ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
-       else
-#endif
-               return net_eq(ib2_net(tb), net) && tb->port == port &&
-                       tb->l3mdev == l3mdev && tb->rcv_saddr == sk->sk_rcv_saddr;
-}
-
-bool check_bind2_bucket_match_nulladdr(struct inet_bind2_bucket *tb,
-                                      struct net *net, const unsigned short port,
-                                      int l3mdev, const struct sock *sk)
-{
-#if IS_ENABLED(CONFIG_IPV6)
-       struct in6_addr nulladdr = {};
-
-       if (sk->sk_family == AF_INET6)
-               return net_eq(ib2_net(tb), net) && tb->port == port &&
-                       tb->l3mdev == l3mdev &&
-                       ipv6_addr_equal(&tb->v6_rcv_saddr, &nulladdr);
-       else
-#endif
-               return net_eq(ib2_net(tb), net) && tb->port == port &&
-                       tb->l3mdev == l3mdev && tb->rcv_saddr == 0;
-}
-
-static struct inet_bind2_hashbucket *
-inet_bhashfn_portaddr(struct inet_hashinfo *hinfo, const struct sock *sk,
-                     const struct net *net, unsigned short port)
-{
-       u32 hash;
-
-#if IS_ENABLED(CONFIG_IPV6)
-       if (sk->sk_family == AF_INET6)
-               hash = ipv6_portaddr_hash(net, &sk->sk_v6_rcv_saddr, port);
-       else
-#endif
-               hash = ipv4_portaddr_hash(net, sk->sk_rcv_saddr, port);
-       return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
-}
-
-/* This should only be called when the spinlock for the socket's corresponding
- * bind_hashbucket is held
- */
-struct inet_bind2_bucket *
-inet_bind2_bucket_find(struct inet_hashinfo *hinfo, struct net *net,
-                      const unsigned short port, int l3mdev, struct sock *sk,
-                      struct inet_bind2_hashbucket **head)
-{
-       struct inet_bind2_bucket *bhash2 = NULL;
-       struct inet_bind2_hashbucket *h;
-
-       h = inet_bhashfn_portaddr(hinfo, sk, net, port);
-       inet_bind_bucket_for_each(bhash2, &h->chain) {
-               if (check_bind2_bucket_match(bhash2, net, port, l3mdev, sk))
-                       break;
-       }
-
-       if (head)
-               *head = h;
-
-       return bhash2;
-}
-
 /* RFC 6056 3.3.4.  Algorithm 4: Double-Hash Port Selection Algorithm
  * Note that we use 32bit integers (vs RFC 'short integers')
  * because 2^16 is not a multiple of num_ephemeral and this
@@ -846,13 +695,10 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
 {
        struct inet_hashinfo *hinfo = death_row->hashinfo;
        struct inet_timewait_sock *tw = NULL;
-       struct inet_bind2_hashbucket *head2;
        struct inet_bind_hashbucket *head;
        int port = inet_sk(sk)->inet_num;
        struct net *net = sock_net(sk);
-       struct inet_bind2_bucket *tb2;
        struct inet_bind_bucket *tb;
-       bool tb_created = false;
        u32 remaining, offset;
        int ret, i, low, high;
        int l3mdev;
@@ -909,7 +755,8 @@ other_parity_scan:
                 * the established check is already unique enough.
                 */
                inet_bind_bucket_for_each(tb, &head->chain) {
-                       if (check_bind_bucket_match(tb, net, port, l3mdev)) {
+                       if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
+                           tb->port == port) {
                                if (tb->fastreuse >= 0 ||
                                    tb->fastreuseport >= 0)
                                        goto next_port;
@@ -927,7 +774,6 @@ other_parity_scan:
                        spin_unlock_bh(&head->lock);
                        return -ENOMEM;
                }
-               tb_created = true;
                tb->fastreuse = -1;
                tb->fastreuseport = -1;
                goto ok;
@@ -943,17 +789,6 @@ next_port:
        return -EADDRNOTAVAIL;
 
 ok:
-       /* Find the corresponding tb2 bucket since we need to
-        * add the socket to the bhash2 table as well
-        */
-       tb2 = inet_bind2_bucket_find(hinfo, net, port, l3mdev, sk, &head2);
-       if (!tb2) {
-               tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, net,
-                                              head2, port, l3mdev, sk);
-               if (!tb2)
-                       goto error;
-       }
-
        /* Here we want to add a little bit of randomness to the next source
         * port that will be chosen. We use a max() with a random here so that
         * on low contention the randomness is maximal and on high contention
@@ -963,7 +798,7 @@ ok:
        WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
 
        /* Head lock still held and bh's disabled */
-       inet_bind_hash(sk, tb, tb2, port);
+       inet_bind_hash(sk, tb, port);
        if (sk_unhashed(sk)) {
                inet_sk(sk)->inet_sport = htons(port);
                inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
@@ -975,12 +810,6 @@ ok:
                inet_twsk_deschedule_put(tw);
        local_bh_enable();
        return 0;
-
-error:
-       if (tb_created)
-               inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
-       spin_unlock_bh(&head->lock);
-       return -ENOMEM;
 }
 
 /*
index 0ec5018..47ccc34 100644 (file)
@@ -156,7 +156,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
 {
        struct inet_timewait_sock *tw;
 
-       if (refcount_read(&dr->tw_refcount) - 1 >= dr->sysctl_max_tw_buckets)
+       if (refcount_read(&dr->tw_refcount) - 1 >=
+           READ_ONCE(dr->sysctl_max_tw_buckets))
                return NULL;
 
        tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
index da21dfc..e9fed83 100644 (file)
@@ -141,16 +141,20 @@ static void inet_peer_gc(struct inet_peer_base *base,
                         struct inet_peer *gc_stack[],
                         unsigned int gc_cnt)
 {
+       int peer_threshold, peer_maxttl, peer_minttl;
        struct inet_peer *p;
        __u32 delta, ttl;
        int i;
 
-       if (base->total >= inet_peer_threshold)
+       peer_threshold = READ_ONCE(inet_peer_threshold);
+       peer_maxttl = READ_ONCE(inet_peer_maxttl);
+       peer_minttl = READ_ONCE(inet_peer_minttl);
+
+       if (base->total >= peer_threshold)
                ttl = 0; /* be aggressive */
        else
-               ttl = inet_peer_maxttl
-                               - (inet_peer_maxttl - inet_peer_minttl) / HZ *
-                                       base->total / inet_peer_threshold * HZ;
+               ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ *
+                       base->total / peer_threshold * HZ;
        for (i = 0; i < gc_cnt; i++) {
                p = gc_stack[i];
 
index e3aa436..e18931a 100644 (file)
@@ -157,7 +157,7 @@ int ip_forward(struct sk_buff *skb)
            !skb_sec_path(skb))
                ip_rt_send_redirect(skb);
 
-       if (net->ipv4.sysctl_ip_fwd_update_priority)
+       if (READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority))
                skb->priority = rt_tos2priority(iph->tos);
 
        return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
index 3b9cd48..5c58e21 100644 (file)
@@ -524,7 +524,6 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
        int tunnel_hlen;
        int version;
        int nhoff;
-       int thoff;
 
        tun_info = skb_tunnel_info(skb);
        if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
@@ -558,10 +557,16 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
            (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
                truncate = true;
 
-       thoff = skb_transport_header(skb) - skb_mac_header(skb);
-       if (skb->protocol == htons(ETH_P_IPV6) &&
-           (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
-               truncate = true;
+       if (skb->protocol == htons(ETH_P_IPV6)) {
+               int thoff;
+
+               if (skb_transport_header_was_set(skb))
+                       thoff = skb_transport_header(skb) - skb_mac_header(skb);
+               else
+                       thoff = nhoff + sizeof(struct ipv6hdr);
+               if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
+                       truncate = true;
+       }
 
        if (version == 1) {
                erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
index b1165f7..1b51239 100644 (file)
@@ -312,14 +312,13 @@ static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph,
               ip_hdr(hint)->tos == iph->tos;
 }
 
-INDIRECT_CALLABLE_DECLARE(int udp_v4_early_demux(struct sk_buff *));
-INDIRECT_CALLABLE_DECLARE(int tcp_v4_early_demux(struct sk_buff *));
+int tcp_v4_early_demux(struct sk_buff *skb);
+int udp_v4_early_demux(struct sk_buff *skb);
 static int ip_rcv_finish_core(struct net *net, struct sock *sk,
                              struct sk_buff *skb, struct net_device *dev,
                              const struct sk_buff *hint)
 {
        const struct iphdr *iph = ip_hdr(skb);
-       int (*edemux)(struct sk_buff *skb);
        int err, drop_reason;
        struct rtable *rt;
 
@@ -332,21 +331,29 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
                        goto drop_error;
        }
 
-       if (net->ipv4.sysctl_ip_early_demux &&
+       if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
            !skb_dst(skb) &&
            !skb->sk &&
            !ip_is_fragment(iph)) {
-               const struct net_protocol *ipprot;
-               int protocol = iph->protocol;
-
-               ipprot = rcu_dereference(inet_protos[protocol]);
-               if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
-                       err = INDIRECT_CALL_2(edemux, tcp_v4_early_demux,
-                                             udp_v4_early_demux, skb);
-                       if (unlikely(err))
-                               goto drop_error;
-                       /* must reload iph, skb->head might have changed */
-                       iph = ip_hdr(skb);
+               switch (iph->protocol) {
+               case IPPROTO_TCP:
+                       if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) {
+                               tcp_v4_early_demux(skb);
+
+                               /* must reload iph, skb->head might have changed */
+                               iph = ip_hdr(skb);
+                       }
+                       break;
+               case IPPROTO_UDP:
+                       if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) {
+                               err = udp_v4_early_demux(skb);
+                               if (unlikely(err))
+                                       goto drop_error;
+
+                               /* must reload iph, skb->head might have changed */
+                               iph = ip_hdr(skb);
+                       }
+                       break;
                }
        }
 
index 445a9ec..a8a323e 100644 (file)
@@ -782,7 +782,7 @@ static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen)
        /* numsrc >= (4G-140)/128 overflow in 32 bits */
        err = -ENOBUFS;
        if (gsf->gf_numsrc >= 0x1ffffff ||
-           gsf->gf_numsrc > sock_net(sk)->ipv4.sysctl_igmp_max_msf)
+           gsf->gf_numsrc > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf))
                goto out_free_gsf;
 
        err = -EINVAL;
@@ -832,7 +832,7 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
 
        /* numsrc >= (4G-140)/128 overflow in 32 bits */
        err = -ENOBUFS;
-       if (n > sock_net(sk)->ipv4.sysctl_igmp_max_msf)
+       if (n > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf))
                goto out_free_gsf;
        err = set_mcast_msfilter(sk, gf32->gf_interface, n, gf32->gf_fmode,
                                 &gf32->gf_group, gf32->gf_slist_flex);
@@ -1244,7 +1244,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname,
                }
                /* numsrc >= (1G-4) overflow in 32 bits */
                if (msf->imsf_numsrc >= 0x3ffffffcU ||
-                   msf->imsf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
+                   msf->imsf_numsrc > READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) {
                        kfree(msf);
                        err = -ENOBUFS;
                        break;
@@ -1606,7 +1606,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
        {
                struct net *net = sock_net(sk);
                val = (inet->uc_ttl == -1 ?
-                      net->ipv4.sysctl_ip_default_ttl :
+                      READ_ONCE(net->ipv4.sysctl_ip_default_ttl) :
                       inet->uc_ttl);
                break;
        }
index 6b2dc7b..cc1caab 100644 (file)
@@ -410,7 +410,7 @@ int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
        u32 mtu = dst_mtu(encap_dst) - headroom;
 
        if ((skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) ||
-           (!skb_is_gso(skb) && (skb->len - skb_mac_header_len(skb)) <= mtu))
+           (!skb_is_gso(skb) && (skb->len - skb_network_offset(skb)) <= mtu))
                return 0;
 
        skb_dst_update_pmtu_no_confirm(skb, mtu);
index 918c61f..d640adc 100644 (file)
@@ -62,7 +62,7 @@ struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net,
 
        skb_reserve(nskb, LL_MAX_HEADER);
        niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
-                                  net->ipv4.sysctl_ip_default_ttl);
+                                  READ_ONCE(net->ipv4.sysctl_ip_default_ttl));
        nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
        niph->tot_len = htons(nskb->len);
        ip_send_check(niph);
@@ -117,7 +117,7 @@ struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
 
        skb_reserve(nskb, LL_MAX_HEADER);
        niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
-                                  net->ipv4.sysctl_ip_default_ttl);
+                                  READ_ONCE(net->ipv4.sysctl_ip_default_ttl));
 
        skb_reset_transport_header(nskb);
        icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
index e459a39..853a75a 100644 (file)
@@ -1858,7 +1858,7 @@ static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
                /* __ip6_del_rt does a release, so do a hold here */
                fib6_info_hold(f6i);
                ipv6_stub->ip6_del_rt(net, f6i,
-                                     !net->ipv4.sysctl_nexthop_compat_mode);
+                                     !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode));
        }
 }
 
@@ -2361,7 +2361,8 @@ out:
        if (!rc) {
                nh_base_seq_inc(net);
                nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
-               if (replace_notify && net->ipv4.sysctl_nexthop_compat_mode)
+               if (replace_notify &&
+                   READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode))
                        nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
        }
 
index 1a43ca7..3c6101d 100644 (file)
@@ -319,12 +319,16 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
                pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
                         sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
 
+               if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
+                       return 0;
+
                tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
                chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
 
-               if (!inet_addr_valid_or_nonlocal(net, inet_sk(sk),
-                                                addr->sin_addr.s_addr,
-                                                chk_addr_ret))
+               if (chk_addr_ret == RTN_MULTICAST ||
+                   chk_addr_ret == RTN_BROADCAST ||
+                   (chk_addr_ret != RTN_LOCAL &&
+                    !inet_can_nonlocal_bind(net, isk)))
                        return -EADDRNOTAVAIL;
 
 #if IS_ENABLED(CONFIG_IPV6)
index 2883607..0088a4c 100644 (file)
@@ -387,7 +387,7 @@ static int snmp_seq_show_ipstats(struct seq_file *seq, void *v)
 
        seq_printf(seq, "\nIp: %d %d",
                   IPV4_DEVCONF_ALL(net, FORWARDING) ? 1 : 2,
-                  net->ipv4.sysctl_ip_default_ttl);
+                  READ_ONCE(net->ipv4.sysctl_ip_default_ttl));
 
        BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
        snmp_get_cpu_field64_batch(buff64, snmp4_ipstats_list,
index 356f535..4702c61 100644 (file)
@@ -1398,7 +1398,7 @@ u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
        struct fib_info *fi = res->fi;
        u32 mtu = 0;
 
-       if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
+       if (READ_ONCE(dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu) ||
            fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
                mtu = fi->fib_mtu;
 
@@ -1929,7 +1929,7 @@ static u32 fib_multipath_custom_hash_outer(const struct net *net,
                                           const struct sk_buff *skb,
                                           bool *p_has_inner)
 {
-       u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
+       u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
        struct flow_keys keys, hash_keys;
 
        if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
@@ -1958,7 +1958,7 @@ static u32 fib_multipath_custom_hash_inner(const struct net *net,
                                           const struct sk_buff *skb,
                                           bool has_inner)
 {
-       u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
+       u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
        struct flow_keys keys, hash_keys;
 
        /* We assume the packet carries an encapsulation, but if none was
@@ -2018,7 +2018,7 @@ static u32 fib_multipath_custom_hash_skb(const struct net *net,
 static u32 fib_multipath_custom_hash_fl4(const struct net *net,
                                         const struct flowi4 *fl4)
 {
-       u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
+       u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
        struct flow_keys hash_keys;
 
        if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
@@ -2048,7 +2048,7 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
        struct flow_keys hash_keys;
        u32 mhash = 0;
 
-       switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
+       switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
        case 0:
                memset(&hash_keys, 0, sizeof(hash_keys));
                hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
index f33c31d..942d2df 100644 (file)
@@ -247,12 +247,12 @@ bool cookie_timestamp_decode(const struct net *net,
                return true;
        }
 
-       if (!net->ipv4.sysctl_tcp_timestamps)
+       if (!READ_ONCE(net->ipv4.sysctl_tcp_timestamps))
                return false;
 
        tcp_opt->sack_ok = (options & TS_OPT_SACK) ? TCP_SACK_SEEN : 0;
 
-       if (tcp_opt->sack_ok && !net->ipv4.sysctl_tcp_sack)
+       if (tcp_opt->sack_ok && !READ_ONCE(net->ipv4.sysctl_tcp_sack))
                return false;
 
        if ((options & TS_OPT_WSCALE_MASK) == TS_OPT_WSCALE_MASK)
@@ -261,7 +261,7 @@ bool cookie_timestamp_decode(const struct net *net,
        tcp_opt->wscale_ok = 1;
        tcp_opt->snd_wscale = options & TS_OPT_WSCALE_MASK;
 
-       return net->ipv4.sysctl_tcp_window_scaling != 0;
+       return READ_ONCE(net->ipv4.sysctl_tcp_window_scaling) != 0;
 }
 EXPORT_SYMBOL(cookie_timestamp_decode);
 
@@ -273,7 +273,7 @@ bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt,
        if (!ecn_ok)
                return false;
 
-       if (net->ipv4.sysctl_tcp_ecn)
+       if (READ_ONCE(net->ipv4.sysctl_tcp_ecn))
                return true;
 
        return dst_feature(dst, RTAX_FEATURE_ECN);
@@ -340,7 +340,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
        struct flowi4 fl4;
        u32 tsoff = 0;
 
-       if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
+       if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) ||
+           !th->ack || th->rst)
                goto out;
 
        if (tcp_synq_no_recent_overflow(sk))
index cd448cd..5490c28 100644 (file)
@@ -84,7 +84,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
                 * port limit.
                 */
                if ((range[1] < range[0]) ||
-                   (range[0] < net->ipv4.sysctl_ip_prot_sock))
+                   (range[0] < READ_ONCE(net->ipv4.sysctl_ip_prot_sock)))
                        ret = -EINVAL;
                else
                        set_local_port_range(net, range);
@@ -110,7 +110,7 @@ static int ipv4_privileged_ports(struct ctl_table *table, int write,
                .extra2 = &ip_privileged_port_max,
        };
 
-       pports = net->ipv4.sysctl_ip_prot_sock;
+       pports = READ_ONCE(net->ipv4.sysctl_ip_prot_sock);
 
        ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
 
@@ -122,7 +122,7 @@ static int ipv4_privileged_ports(struct ctl_table *table, int write,
                if (range[0] < pports)
                        ret = -EINVAL;
                else
-                       net->ipv4.sysctl_ip_prot_sock = pports;
+                       WRITE_ONCE(net->ipv4.sysctl_ip_prot_sock, pports);
        }
 
        return ret;
@@ -350,61 +350,6 @@ bad_key:
        return ret;
 }
 
-static void proc_configure_early_demux(int enabled, int protocol)
-{
-       struct net_protocol *ipprot;
-#if IS_ENABLED(CONFIG_IPV6)
-       struct inet6_protocol *ip6prot;
-#endif
-
-       rcu_read_lock();
-
-       ipprot = rcu_dereference(inet_protos[protocol]);
-       if (ipprot)
-               ipprot->early_demux = enabled ? ipprot->early_demux_handler :
-                                               NULL;
-
-#if IS_ENABLED(CONFIG_IPV6)
-       ip6prot = rcu_dereference(inet6_protos[protocol]);
-       if (ip6prot)
-               ip6prot->early_demux = enabled ? ip6prot->early_demux_handler :
-                                                NULL;
-#endif
-       rcu_read_unlock();
-}
-
-static int proc_tcp_early_demux(struct ctl_table *table, int write,
-                               void *buffer, size_t *lenp, loff_t *ppos)
-{
-       int ret = 0;
-
-       ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos);
-
-       if (write && !ret) {
-               int enabled = init_net.ipv4.sysctl_tcp_early_demux;
-
-               proc_configure_early_demux(enabled, IPPROTO_TCP);
-       }
-
-       return ret;
-}
-
-static int proc_udp_early_demux(struct ctl_table *table, int write,
-                               void *buffer, size_t *lenp, loff_t *ppos)
-{
-       int ret = 0;
-
-       ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos);
-
-       if (write && !ret) {
-               int enabled = init_net.ipv4.sysctl_udp_early_demux;
-
-               proc_configure_early_demux(enabled, IPPROTO_UDP);
-       }
-
-       return ret;
-}
-
 static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table,
                                             int write, void *buffer,
                                             size_t *lenp, loff_t *ppos)
@@ -599,6 +544,8 @@ static struct ctl_table ipv4_net_table[] = {
                .maxlen         = sizeof(u8),
                .mode           = 0644,
                .proc_handler   = proc_dou8vec_minmax,
+               .extra1         = SYSCTL_ZERO,
+               .extra2         = SYSCTL_ONE
        },
        {
                .procname       = "icmp_echo_enable_probe",
@@ -615,6 +562,8 @@ static struct ctl_table ipv4_net_table[] = {
                .maxlen         = sizeof(u8),
                .mode           = 0644,
                .proc_handler   = proc_dou8vec_minmax,
+               .extra1         = SYSCTL_ZERO,
+               .extra2         = SYSCTL_ONE
        },
        {
                .procname       = "icmp_ignore_bogus_error_responses",
@@ -622,6 +571,8 @@ static struct ctl_table ipv4_net_table[] = {
                .maxlen         = sizeof(u8),
                .mode           = 0644,
                .proc_handler   = proc_dou8vec_minmax,
+               .extra1         = SYSCTL_ZERO,
+               .extra2         = SYSCTL_ONE
        },
        {
                .procname       = "icmp_errors_use_inbound_ifaddr",
@@ -629,6 +580,8 @@ static struct ctl_table ipv4_net_table[] = {
                .maxlen         = sizeof(u8),
                .mode           = 0644,
                .proc_handler   = proc_dou8vec_minmax,
+               .extra1         = SYSCTL_ZERO,
+               .extra2         = SYSCTL_ONE
        },
        {
                .procname       = "icmp_ratelimit",
@@ -668,6 +621,8 @@ static struct ctl_table ipv4_net_table[] = {
                .maxlen         = sizeof(u8),
                .mode           = 0644,
                .proc_handler   = proc_dou8vec_minmax,
+               .extra1         = SYSCTL_ZERO,
+               .extra2         = SYSCTL_TWO,
        },
        {
                .procname       = "tcp_ecn_fallback",
@@ -675,6 +630,8 @@ static struct ctl_table ipv4_net_table[] = {
                .maxlen         = sizeof(u8),
                .mode           = 0644,
                .proc_handler   = proc_dou8vec_minmax,
+               .extra1         = SYSCTL_ZERO,
+               .extra2         = SYSCTL_ONE,
        },
        {
                .procname       = "ip_dynaddr",
@@ -695,14 +652,14 @@ static struct ctl_table ipv4_net_table[] = {
                .data           = &init_net.ipv4.sysctl_udp_early_demux,
                .maxlen         = sizeof(u8),
                .mode           = 0644,
-               .proc_handler   = proc_udp_early_demux
+               .proc_handler   = proc_dou8vec_minmax,
        },
        {
                .procname       = "tcp_early_demux",
                .data           = &init_net.ipv4.sysctl_tcp_early_demux,
                .maxlen         = sizeof(u8),
                .mode           = 0644,
-               .proc_handler   = proc_tcp_early_demux
+               .proc_handler   = proc_dou8vec_minmax,
        },
        {
                .procname       = "nexthop_compat_mode",
index 9984d23..7668817 100644 (file)
@@ -441,7 +441,7 @@ void tcp_init_sock(struct sock *sk)
        tp->snd_cwnd_clamp = ~0;
        tp->mss_cache = TCP_MSS_DEFAULT;
 
-       tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
+       tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering);
        tcp_assign_congestion_control(sk);
 
        tp->tsoffset = 0;
@@ -452,8 +452,8 @@ void tcp_init_sock(struct sock *sk)
 
        icsk->icsk_sync_mss = tcp_sync_mss;
 
-       WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
-       WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
+       WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]));
+       WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]));
 
        sk_sockets_allocated_inc(sk);
 }
@@ -686,7 +686,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
                                int size_goal)
 {
        return skb->len < size_goal &&
-              sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
+              READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) &&
               !tcp_rtx_queue_empty(sk) &&
               refcount_read(&sk->sk_wmem_alloc) > skb->truesize &&
               tcp_skb_can_collapse_to(skb);
@@ -1150,7 +1150,8 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
        struct sockaddr *uaddr = msg->msg_name;
        int err, flags;
 
-       if (!(sock_net(sk)->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
+       if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) &
+             TFO_CLIENT_ENABLE) ||
            (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
             uaddr->sa_family == AF_UNSPEC))
                return -EOPNOTSUPP;
@@ -1723,7 +1724,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
        if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
                cap = sk->sk_rcvbuf >> 1;
        else
-               cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1;
+               cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;
        val = min(val, cap);
        WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
 
@@ -2715,7 +2716,8 @@ static void tcp_orphan_update(struct timer_list *unused)
 
 static bool tcp_too_many_orphans(int shift)
 {
-       return READ_ONCE(tcp_orphan_cache) << shift > sysctl_tcp_max_orphans;
+       return READ_ONCE(tcp_orphan_cache) << shift >
+               READ_ONCE(sysctl_tcp_max_orphans);
 }
 
 bool tcp_check_oom(struct sock *sk, int shift)
@@ -3616,7 +3618,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname,
        case TCP_FASTOPEN_CONNECT:
                if (val > 1 || val < 0) {
                        err = -EINVAL;
-               } else if (net->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) {
+               } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) &
+                          TFO_CLIENT_ENABLE) {
                        if (sk->sk_state == TCP_CLOSE)
                                tp->fastopen_connect = val;
                        else
@@ -3966,12 +3969,13 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
                val = keepalive_probes(tp);
                break;
        case TCP_SYNCNT:
-               val = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
+               val = icsk->icsk_syn_retries ? :
+                       READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
                break;
        case TCP_LINGER2:
                val = tp->linger2;
                if (val >= 0)
-                       val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ;
+                       val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ;
                break;
        case TCP_DEFER_ACCEPT:
                val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
@@ -4455,9 +4459,18 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
                return SKB_DROP_REASON_TCP_MD5UNEXPECTED;
        }
 
-       /* check the signature */
-       genhash = tp->af_specific->calc_md5_hash(newhash, hash_expected,
-                                                NULL, skb);
+       /* Check the signature.
+        * To support dual stack listeners, we need to handle
+        * IPv4-mapped case.
+        */
+       if (family == AF_INET)
+               genhash = tcp_v4_md5_hash_skb(newhash,
+                                             hash_expected,
+                                             NULL, skb);
+       else
+               genhash = tp->af_specific->calc_md5_hash(newhash,
+                                                        hash_expected,
+                                                        NULL, skb);
 
        if (genhash || memcmp(hash_location, newhash, 16) != 0) {
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
@@ -4604,12 +4617,6 @@ void __init tcp_init(void)
                                  SLAB_HWCACHE_ALIGN | SLAB_PANIC |
                                  SLAB_ACCOUNT,
                                  NULL);
-       tcp_hashinfo.bind2_bucket_cachep =
-               kmem_cache_create("tcp_bind2_bucket",
-                                 sizeof(struct inet_bind2_bucket), 0,
-                                 SLAB_HWCACHE_ALIGN | SLAB_PANIC |
-                                 SLAB_ACCOUNT,
-                                 NULL);
 
        /* Size and allocate the main established and bind bucket
         * hash tables.
@@ -4632,9 +4639,8 @@ void __init tcp_init(void)
        if (inet_ehash_locks_alloc(&tcp_hashinfo))
                panic("TCP: failed to alloc ehash_locks");
        tcp_hashinfo.bhash =
-               alloc_large_system_hash("TCP bind bhash tables",
-                                       sizeof(struct inet_bind_hashbucket) +
-                                       sizeof(struct inet_bind2_hashbucket),
+               alloc_large_system_hash("TCP bind",
+                                       sizeof(struct inet_bind_hashbucket),
                                        tcp_hashinfo.ehash_mask + 1,
                                        17, /* one slot per 128 KB of memory */
                                        0,
@@ -4643,12 +4649,9 @@ void __init tcp_init(void)
                                        0,
                                        64 * 1024);
        tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
-       tcp_hashinfo.bhash2 =
-               (struct inet_bind2_hashbucket *)(tcp_hashinfo.bhash + tcp_hashinfo.bhash_size);
        for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
                spin_lock_init(&tcp_hashinfo.bhash[i].lock);
                INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
-               INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain);
        }
 
 
index be3947e..0d3f68b 100644 (file)
@@ -611,9 +611,6 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
                return 0;
        }
 
-       if (inet_csk_has_ulp(sk))
-               return -EINVAL;
-
        if (sk->sk_family == AF_INET6) {
                if (tcp_bpf_assert_proto_ops(psock->sk_proto))
                        return -EINVAL;
index fdbcf2a..825b216 100644 (file)
@@ -332,7 +332,7 @@ static bool tcp_fastopen_no_cookie(const struct sock *sk,
                                   const struct dst_entry *dst,
                                   int flag)
 {
-       return (sock_net(sk)->ipv4.sysctl_tcp_fastopen & flag) ||
+       return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) ||
               tcp_sk(sk)->fastopen_no_cookie ||
               (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
 }
@@ -347,7 +347,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
                              const struct dst_entry *dst)
 {
        bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
-       int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
+       int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
        struct tcp_fastopen_cookie valid_foc = { .len = -1 };
        struct sock *child;
        int ret = 0;
@@ -489,7 +489,7 @@ void tcp_fastopen_active_disable(struct sock *sk)
 {
        struct net *net = sock_net(sk);
 
-       if (!sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout)
+       if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout))
                return;
 
        /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
@@ -510,7 +510,8 @@ void tcp_fastopen_active_disable(struct sock *sk)
  */
 bool tcp_fastopen_active_should_disable(struct sock *sk)
 {
-       unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout;
+       unsigned int tfo_bh_timeout =
+               READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout);
        unsigned long timeout;
        int tfo_da_times;
        int multiplier;
index 2e2a9ec..b163799 100644 (file)
@@ -426,7 +426,7 @@ static void tcp_sndbuf_expand(struct sock *sk)
 
        if (sk->sk_sndbuf < sndmem)
                WRITE_ONCE(sk->sk_sndbuf,
-                          min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]));
+                          min(sndmem, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[2])));
 }
 
 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -461,7 +461,7 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb,
        struct tcp_sock *tp = tcp_sk(sk);
        /* Optimize this! */
        int truesize = tcp_win_from_space(sk, skbtruesize) >> 1;
-       int window = tcp_win_from_space(sk, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;
+       int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1;
 
        while (tp->rcv_ssthresh <= window) {
                if (truesize <= skb->len)
@@ -534,7 +534,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
  */
 static void tcp_init_buffer_space(struct sock *sk)
 {
-       int tcp_app_win = sock_net(sk)->ipv4.sysctl_tcp_app_win;
+       int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win);
        struct tcp_sock *tp = tcp_sk(sk);
        int maxwin;
 
@@ -574,16 +574,17 @@ static void tcp_clamp_window(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct net *net = sock_net(sk);
+       int rmem2;
 
        icsk->icsk_ack.quick = 0;
+       rmem2 = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);
 
-       if (sk->sk_rcvbuf < net->ipv4.sysctl_tcp_rmem[2] &&
+       if (sk->sk_rcvbuf < rmem2 &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
            !tcp_under_memory_pressure(sk) &&
            sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
                WRITE_ONCE(sk->sk_rcvbuf,
-                          min(atomic_read(&sk->sk_rmem_alloc),
-                              net->ipv4.sysctl_tcp_rmem[2]));
+                          min(atomic_read(&sk->sk_rmem_alloc), rmem2));
        }
        if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
                tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
@@ -724,7 +725,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
         * <prev RTT . ><current RTT .. ><next RTT .... >
         */
 
-       if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
                int rcvmem, rcvbuf;
                u64 rcvwin, grow;
@@ -745,7 +746,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
 
                do_div(rcvwin, tp->advmss);
                rcvbuf = min_t(u64, rcvwin * rcvmem,
-                              sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+                              READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
                if (rcvbuf > sk->sk_rcvbuf) {
                        WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
 
@@ -910,9 +911,9 @@ static void tcp_update_pacing_rate(struct sock *sk)
         *       end of slow start and should slow down.
         */
        if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2)
-               rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio;
+               rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio);
        else
-               rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio;
+               rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio);
 
        rate *= max(tcp_snd_cwnd(tp), tp->packets_out);
 
@@ -1051,7 +1052,7 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
                         tp->undo_marker ? tp->undo_retrans : 0);
 #endif
                tp->reordering = min_t(u32, (metric + mss - 1) / mss,
-                                      sock_net(sk)->ipv4.sysctl_tcp_max_reordering);
+                                      READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering));
        }
 
        /* This exciting event is worth to be remembered. 8) */
@@ -2030,7 +2031,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
                return;
 
        tp->reordering = min_t(u32, tp->packets_out + addend,
-                              sock_net(sk)->ipv4.sysctl_tcp_max_reordering);
+                              READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering));
        tp->reord_seen++;
        NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER);
 }
@@ -2095,7 +2096,8 @@ static inline void tcp_init_undo(struct tcp_sock *tp)
 
 static bool tcp_is_rack(const struct sock *sk)
 {
-       return sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION;
+       return READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
+               TCP_RACK_LOSS_DETECTION;
 }
 
 /* If we detect SACK reneging, forget all SACK information
@@ -2139,6 +2141,7 @@ void tcp_enter_loss(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct net *net = sock_net(sk);
        bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
+       u8 reordering;
 
        tcp_timeout_mark_lost(sk);
 
@@ -2159,10 +2162,12 @@ void tcp_enter_loss(struct sock *sk)
        /* Timeout in disordered state after receiving substantial DUPACKs
         * suggests that the degree of reordering is over-estimated.
         */
+       reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering);
        if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
-           tp->sacked_out >= net->ipv4.sysctl_tcp_reordering)
+           tp->sacked_out >= reordering)
                tp->reordering = min_t(unsigned int, tp->reordering,
-                                      net->ipv4.sysctl_tcp_reordering);
+                                      reordering);
+
        tcp_set_ca_state(sk, TCP_CA_Loss);
        tp->high_seq = tp->snd_nxt;
        tcp_ecn_queue_cwr(tp);
@@ -2171,7 +2176,7 @@ void tcp_enter_loss(struct sock *sk)
         * loss recovery is underway except recurring timeout(s) on
         * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
         */
-       tp->frto = net->ipv4.sysctl_tcp_frto &&
+       tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) &&
                   (new_recovery || icsk->icsk_retransmits) &&
                   !inet_csk(sk)->icsk_mtup.probe_size;
 }
@@ -3054,7 +3059,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
 
 static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
 {
-       u32 wlen = sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen * HZ;
+       u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ;
        struct tcp_sock *tp = tcp_sk(sk);
 
        if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {
@@ -3464,7 +3469,8 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
         * new SACK or ECE mark may first advance cwnd here and later reduce
         * cwnd in tcp_fastretrans_alert() based on more states.
         */
-       if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering)
+       if (tcp_sk(sk)->reordering >
+           READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering))
                return flag & FLAG_FORWARD_PROGRESS;
 
        return flag & FLAG_DATA_ACKED;
@@ -3576,7 +3582,8 @@ static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
        if (*last_oow_ack_time) {
                s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time);
 
-               if (0 <= elapsed && elapsed < net->ipv4.sysctl_tcp_invalid_ratelimit) {
+               if (0 <= elapsed &&
+                   elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) {
                        NET_INC_STATS(net, mib_idx);
                        return true;    /* rate-limited: don't send yet! */
                }
@@ -3624,7 +3631,7 @@ static void tcp_send_challenge_ack(struct sock *sk)
        /* Then check host-wide RFC 5961 rate limit. */
        now = jiffies / HZ;
        if (now != challenge_timestamp) {
-               u32 ack_limit = net->ipv4.sysctl_tcp_challenge_ack_limit;
+               u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
                u32 half = (ack_limit + 1) >> 1;
 
                challenge_timestamp = now;
@@ -4056,7 +4063,7 @@ void tcp_parse_options(const struct net *net,
                                break;
                        case TCPOPT_WINDOW:
                                if (opsize == TCPOLEN_WINDOW && th->syn &&
-                                   !estab && net->ipv4.sysctl_tcp_window_scaling) {
+                                   !estab && READ_ONCE(net->ipv4.sysctl_tcp_window_scaling)) {
                                        __u8 snd_wscale = *(__u8 *)ptr;
                                        opt_rx->wscale_ok = 1;
                                        if (snd_wscale > TCP_MAX_WSCALE) {
@@ -4072,7 +4079,7 @@ void tcp_parse_options(const struct net *net,
                        case TCPOPT_TIMESTAMP:
                                if ((opsize == TCPOLEN_TIMESTAMP) &&
                                    ((estab && opt_rx->tstamp_ok) ||
-                                    (!estab && net->ipv4.sysctl_tcp_timestamps))) {
+                                    (!estab && READ_ONCE(net->ipv4.sysctl_tcp_timestamps)))) {
                                        opt_rx->saw_tstamp = 1;
                                        opt_rx->rcv_tsval = get_unaligned_be32(ptr);
                                        opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
@@ -4080,7 +4087,7 @@ void tcp_parse_options(const struct net *net,
                                break;
                        case TCPOPT_SACK_PERM:
                                if (opsize == TCPOLEN_SACK_PERM && th->syn &&
-                                   !estab && net->ipv4.sysctl_tcp_sack) {
+                                   !estab && READ_ONCE(net->ipv4.sysctl_tcp_sack)) {
                                        opt_rx->sack_ok = TCP_SACK_SEEN;
                                        tcp_sack_reset(opt_rx);
                                }
@@ -4421,7 +4428,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
+       if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
                int mib_idx;
 
                if (before(seq, tp->rcv_nxt))
@@ -4468,7 +4475,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
                NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
                tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
 
-               if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
+               if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
                        u32 end_seq = TCP_SKB_CB(skb)->end_seq;
 
                        tcp_rcv_spurious_retrans(sk, skb);
@@ -5514,7 +5521,7 @@ send_now:
        }
 
        if (!tcp_is_sack(tp) ||
-           tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)
+           tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr))
                goto send_now;
 
        if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
@@ -5535,11 +5542,12 @@ send_now:
        if (tp->srtt_us && tp->srtt_us < rtt)
                rtt = tp->srtt_us;
 
-       delay = min_t(unsigned long, sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns,
+       delay = min_t(unsigned long,
+                     READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns),
                      rtt * (NSEC_PER_USEC >> 3)/20);
        sock_hold(sk);
        hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay),
-                              sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns,
+                              READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns),
                               HRTIMER_MODE_REL_PINNED_SOFT);
 }
 
@@ -5567,7 +5575,7 @@ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
        struct tcp_sock *tp = tcp_sk(sk);
        u32 ptr = ntohs(th->urg_ptr);
 
-       if (ptr && !sock_net(sk)->ipv4.sysctl_tcp_stdurg)
+       if (ptr && !READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_stdurg))
                ptr--;
        ptr += ntohl(th->seq);
 
@@ -6729,7 +6737,7 @@ static void tcp_ecn_create_request(struct request_sock *req,
 
        ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);
        ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK);
-       ecn_ok = net->ipv4.sysctl_tcp_ecn || ecn_ok_dst;
+       ecn_ok = READ_ONCE(net->ipv4.sysctl_tcp_ecn) || ecn_ok_dst;
 
        if (((!ect || th->res1) && ecn_ok) || tcp_ca_needs_ecn(listen_sk) ||
            (ecn_ok_dst & DST_FEATURE_ECN_CA) ||
@@ -6797,11 +6805,14 @@ static bool tcp_syn_flood_action(const struct sock *sk, const char *proto)
 {
        struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
        const char *msg = "Dropping request";
-       bool want_cookie = false;
        struct net *net = sock_net(sk);
+       bool want_cookie = false;
+       u8 syncookies;
+
+       syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
 
 #ifdef CONFIG_SYN_COOKIES
-       if (net->ipv4.sysctl_tcp_syncookies) {
+       if (syncookies) {
                msg = "Sending cookies";
                want_cookie = true;
                __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
@@ -6809,8 +6820,7 @@ static bool tcp_syn_flood_action(const struct sock *sk, const char *proto)
 #endif
                __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
 
-       if (!queue->synflood_warned &&
-           net->ipv4.sysctl_tcp_syncookies != 2 &&
+       if (!queue->synflood_warned && syncookies != 2 &&
            xchg(&queue->synflood_warned, 1) == 0)
                net_info_ratelimited("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
                                     proto, sk->sk_num, msg);
@@ -6859,7 +6869,7 @@ u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
        struct tcp_sock *tp = tcp_sk(sk);
        u16 mss;
 
-       if (sock_net(sk)->ipv4.sysctl_tcp_syncookies != 2 &&
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) != 2 &&
            !inet_csk_reqsk_queue_is_full(sk))
                return 0;
 
@@ -6893,13 +6903,15 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
        bool want_cookie = false;
        struct dst_entry *dst;
        struct flowi fl;
+       u8 syncookies;
+
+       syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
 
        /* TW buckets are converted to open requests without
         * limitations, they conserve resources and peer is
         * evidently real one.
         */
-       if ((net->ipv4.sysctl_tcp_syncookies == 2 ||
-            inet_csk_reqsk_queue_is_full(sk)) && !isn) {
+       if ((syncookies == 2 || inet_csk_reqsk_queue_is_full(sk)) && !isn) {
                want_cookie = tcp_syn_flood_action(sk, rsk_ops->slab_name);
                if (!want_cookie)
                        goto drop;
@@ -6948,10 +6960,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
                tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb);
 
        if (!want_cookie && !isn) {
+               int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog);
+
                /* Kill the following clause, if you dislike this way. */
-               if (!net->ipv4.sysctl_tcp_syncookies &&
-                   (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
-                    (net->ipv4.sysctl_max_syn_backlog >> 2)) &&
+               if (!syncookies &&
+                   (max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
+                    (max_syn_backlog >> 2)) &&
                    !tcp_peer_is_proven(req, dst)) {
                        /* Without syncookies last quarter of
                         * backlog is filled with destinations,
index fe8f23b..586c102 100644 (file)
@@ -108,10 +108,10 @@ static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
 
 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
 {
+       int reuse = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse);
        const struct inet_timewait_sock *tw = inet_twsk(sktw);
        const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
        struct tcp_sock *tp = tcp_sk(sk);
-       int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
 
        if (reuse == 2) {
                /* Still does not detect *everything* that goes through
@@ -1006,7 +1006,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
        if (skb) {
                __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 
-               tos = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
+               tos = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
                                (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
                                (inet_sk(sk)->tos & INET_ECN_MASK) :
                                inet_sk(sk)->tos;
@@ -1526,7 +1526,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
        /* Set ToS of the new socket based upon the value of incoming SYN.
         * ECT bits are set later in tcp_init_transfer().
         */
-       if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
                newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
 
        if (!dst) {
@@ -1964,7 +1964,10 @@ process:
                struct sock *nsk;
 
                sk = req->rsk_listener;
-               drop_reason = tcp_inbound_md5_hash(sk, skb,
+               if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
+                       drop_reason = SKB_DROP_REASON_XFRM_POLICY;
+               else
+                       drop_reason = tcp_inbound_md5_hash(sk, skb,
                                                   &iph->saddr, &iph->daddr,
                                                   AF_INET, dif, sdif);
                if (unlikely(drop_reason)) {
@@ -2016,6 +2019,7 @@ process:
                        }
                        goto discard_and_relse;
                }
+               nf_reset_ct(skb);
                if (nsk == sk) {
                        reqsk_put(req);
                        tcp_v4_restore_cb(skb);
index 7029b0e..d58e672 100644 (file)
@@ -329,7 +329,7 @@ void tcp_update_metrics(struct sock *sk)
        int m;
 
        sk_dst_confirm(sk);
-       if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
+       if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)
                return;
 
        rcu_read_lock();
@@ -385,7 +385,7 @@ void tcp_update_metrics(struct sock *sk)
 
        if (tcp_in_initial_slowstart(tp)) {
                /* Slow start still did not finish. */
-               if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
+               if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
                    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
                        val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
                        if (val && (tcp_snd_cwnd(tp) >> 1) > val)
@@ -401,7 +401,7 @@ void tcp_update_metrics(struct sock *sk)
        } else if (!tcp_in_slow_start(tp) &&
                   icsk->icsk_ca_state == TCP_CA_Open) {
                /* Cong. avoidance phase, cwnd is reliable. */
-               if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
+               if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
                    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
                        tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
                                       max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh));
@@ -418,7 +418,7 @@ void tcp_update_metrics(struct sock *sk)
                        tcp_metric_set(tm, TCP_METRIC_CWND,
                                       (val + tp->snd_ssthresh) >> 1);
                }
-               if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
+               if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
                    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
                        val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
                        if (val && tp->snd_ssthresh > val)
@@ -428,7 +428,8 @@ void tcp_update_metrics(struct sock *sk)
                if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
                        val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
                        if (val < tp->reordering &&
-                           tp->reordering != net->ipv4.sysctl_tcp_reordering)
+                           tp->reordering !=
+                           READ_ONCE(net->ipv4.sysctl_tcp_reordering))
                                tcp_metric_set(tm, TCP_METRIC_REORDERING,
                                               tp->reordering);
                }
@@ -462,7 +463,7 @@ void tcp_init_metrics(struct sock *sk)
        if (tcp_metric_locked(tm, TCP_METRIC_CWND))
                tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
 
-       val = net->ipv4.sysctl_tcp_no_ssthresh_metrics_save ?
+       val = READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) ?
              0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
        if (val) {
                tp->snd_ssthresh = val;
index 6854bb1..cb95d88 100644 (file)
@@ -173,7 +173,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
                         * Oh well... nobody has a sufficient solution to this
                         * protocol bug yet.
                         */
-                       if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) {
+                       if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
 kill:
                                inet_twsk_deschedule_put(tw);
                                return TCP_TW_SUCCESS;
@@ -781,7 +781,7 @@ listen_overflow:
        if (sk != req->rsk_listener)
                __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
 
-       if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
+       if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
                inet_rsk(req)->acked = 1;
                return NULL;
        }
index 1c05443..4c376b6 100644 (file)
@@ -167,16 +167,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
        if (tcp_packets_in_flight(tp) == 0)
                tcp_ca_event(sk, CA_EVENT_TX_START);
 
-       /* If this is the first data packet sent in response to the
-        * previous received data,
-        * and it is a reply for ato after last received packet,
-        * increase pingpong count.
-        */
-       if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) &&
-           (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
-               inet_csk_inc_pingpong_cnt(sk);
-
        tp->lsndtime = now;
+
+       /* If it is a reply for ato after last received
+        * packet, enter pingpong mode.
+        */
+       if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
+               inet_csk_enter_pingpong_mode(sk);
 }
 
 /* Account for an ACK we sent. */
@@ -230,7 +227,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
         * which we interpret as a sign the remote TCP is not
         * misinterpreting the window field as a signed quantity.
         */
-       if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
                (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
        else
                (*rcv_wnd) = min_t(u32, space, U16_MAX);
@@ -241,7 +238,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
        *rcv_wscale = 0;
        if (wscale_ok) {
                /* Set window scaling on max possible window */
-               space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+               space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
                space = max_t(u32, space, sysctl_rmem_max);
                space = min_t(u32, space, *window_clamp);
                *rcv_wscale = clamp_t(int, ilog2(space) - 15,
@@ -285,7 +282,7 @@ static u16 tcp_select_window(struct sock *sk)
         * scaled window.
         */
        if (!tp->rx_opt.rcv_wscale &&
-           sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
+           READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
                new_win = min(new_win, MAX_TCP_WINDOW);
        else
                new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
@@ -324,7 +321,7 @@ static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
-       bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
+       bool use_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn) == 1 ||
                tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
 
        if (!use_ecn) {
@@ -346,7 +343,7 @@ static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
 
 static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
 {
-       if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback))
                /* tp->ecn_flags are cleared at a later point in time when
                 * SYN ACK is ultimatively being received.
                 */
@@ -791,18 +788,18 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
        opts->mss = tcp_advertise_mss(sk);
        remaining -= TCPOLEN_MSS_ALIGNED;
 
-       if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) {
+       if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps) && !*md5)) {
                opts->options |= OPTION_TS;
                opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
                opts->tsecr = tp->rx_opt.ts_recent;
                remaining -= TCPOLEN_TSTAMP_ALIGNED;
        }
-       if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
+       if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) {
                opts->ws = tp->rx_opt.rcv_wscale;
                opts->options |= OPTION_WSCALE;
                remaining -= TCPOLEN_WSCALE_ALIGNED;
        }
-       if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) {
+       if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) {
                opts->options |= OPTION_SACK_ADVERTISE;
                if (unlikely(!(OPTION_TS & opts->options)))
                        remaining -= TCPOLEN_SACKPERM_ALIGNED;
@@ -1719,7 +1716,8 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
        mss_now -= icsk->icsk_ext_hdr_len;
 
        /* Then reserve room for full set of TCP options and 8 bytes of data */
-       mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss);
+       mss_now = max(mss_now,
+                     READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss));
        return mss_now;
 }
 
@@ -1762,10 +1760,10 @@ void tcp_mtup_init(struct sock *sk)
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct net *net = sock_net(sk);
 
-       icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1;
+       icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1;
        icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
                               icsk->icsk_af_ops->net_header_len;
-       icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
+       icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss));
        icsk->icsk_mtup.probe_size = 0;
        if (icsk->icsk_mtup.enabled)
                icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
@@ -1897,7 +1895,7 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
                if (tp->packets_out > tp->snd_cwnd_used)
                        tp->snd_cwnd_used = tp->packets_out;
 
-               if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle &&
+               if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) &&
                    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
                    !ca_ops->cong_control)
                        tcp_cwnd_application_limited(sk);
@@ -1975,7 +1973,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
 
        bytes = sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift);
 
-       r = tcp_min_rtt(tcp_sk(sk)) >> sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log;
+       r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log);
        if (r < BITS_PER_TYPE(sk->sk_gso_max_size))
                bytes += sk->sk_gso_max_size >> r;
 
@@ -1994,7 +1992,7 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
 
        min_tso = ca_ops->min_tso_segs ?
                        ca_ops->min_tso_segs(sk) :
-                       sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
+                       READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
 
        tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
        return min_t(u32, tso_segs, sk->sk_gso_max_segs);
@@ -2282,7 +2280,7 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
        u32 interval;
        s32 delta;
 
-       interval = net->ipv4.sysctl_tcp_probe_interval;
+       interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval);
        delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
        if (unlikely(delta >= interval * HZ)) {
                int mss = tcp_current_mss(sk);
@@ -2366,7 +2364,7 @@ static int tcp_mtu_probe(struct sock *sk)
         * probing process by not resetting search range to its orignal.
         */
        if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
-               interval < net->ipv4.sysctl_tcp_probe_threshold) {
+           interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) {
                /* Check whether enough time has elaplased for
                 * another round of probing.
                 */
@@ -2506,7 +2504,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
                      sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
        if (sk->sk_pacing_status == SK_PACING_NONE)
                limit = min_t(unsigned long, limit,
-                             sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
+                             READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
        limit <<= factor;
 
        if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
@@ -2740,7 +2738,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
        if (rcu_access_pointer(tp->fastopen_rsk))
                return false;
 
-       early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
+       early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans);
        /* Schedule a loss probe in 2*RTT for SACK capable connections
         * not in loss recovery, that are either limited by cwnd or application.
         */
@@ -3104,7 +3102,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
        struct sk_buff *skb = to, *tmp;
        bool first = true;
 
-       if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)
+       if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse))
                return;
        if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
                return;
@@ -3646,7 +3644,7 @@ static void tcp_connect_init(struct sock *sk)
         * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
         */
        tp->tcp_header_len = sizeof(struct tcphdr);
-       if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps))
                tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -3682,7 +3680,7 @@ static void tcp_connect_init(struct sock *sk)
                                  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
                                  &tp->rcv_wnd,
                                  &tp->window_clamp,
-                                 sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
+                                 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling),
                                  &rcv_wscale,
                                  rcv_wnd);
 
@@ -4089,7 +4087,7 @@ void tcp_send_probe0(struct sock *sk)
 
        icsk->icsk_probes_out++;
        if (err <= 0) {
-               if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
+               if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2))
                        icsk->icsk_backoff++;
                timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
        } else {
index 48f30e7..50abaa9 100644 (file)
@@ -14,7 +14,8 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk)
                        return 0;
 
                if (tp->sacked_out >= tp->reordering &&
-                   !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH))
+                   !(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
+                     TCP_RACK_NO_DUPTHRESH))
                        return 0;
        }
 
@@ -187,7 +188,8 @@ void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND ||
+       if ((READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
+            TCP_RACK_STATIC_REO_WND) ||
            !rs->prior_delivered)
                return;
 
index 20cf4a9..50bba37 100644 (file)
@@ -143,7 +143,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
  */
 static int tcp_orphan_retries(struct sock *sk, bool alive)
 {
-       int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
+       int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
 
        /* We know from an ICMP that something is wrong. */
        if (sk->sk_err_soft && !alive)
@@ -163,7 +163,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
        int mss;
 
        /* Black hole detection */
-       if (!net->ipv4.sysctl_tcp_mtu_probing)
+       if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing))
                return;
 
        if (!icsk->icsk_mtup.enabled) {
@@ -171,9 +171,9 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
                icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
        } else {
                mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
-               mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
-               mss = max(mss, net->ipv4.sysctl_tcp_mtu_probe_floor);
-               mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss);
+               mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss);
+               mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor));
+               mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss));
                icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
        }
        tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
@@ -239,17 +239,18 @@ static int tcp_write_timeout(struct sock *sk)
        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
                if (icsk->icsk_retransmits)
                        __dst_negative_advice(sk);
-               retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
+               retry_until = icsk->icsk_syn_retries ? :
+                       READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
                expired = icsk->icsk_retransmits >= retry_until;
        } else {
-               if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
+               if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) {
                        /* Black hole detection */
                        tcp_mtu_probing(icsk, sk);
 
                        __dst_negative_advice(sk);
                }
 
-               retry_until = net->ipv4.sysctl_tcp_retries2;
+               retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2);
                if (sock_flag(sk, SOCK_DEAD)) {
                        const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
 
@@ -380,7 +381,7 @@ static void tcp_probe_timer(struct sock *sk)
                 msecs_to_jiffies(icsk->icsk_user_timeout))
                goto abort;
 
-       max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
+       max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2);
        if (sock_flag(sk, SOCK_DEAD)) {
                const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
 
@@ -406,12 +407,15 @@ abort:            tcp_write_err(sk);
 static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
-       int max_retries = icsk->icsk_syn_retries ? :
-           sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
        struct tcp_sock *tp = tcp_sk(sk);
+       int max_retries;
 
        req->rsk_ops->syn_ack_timeout(req);
 
+       /* add one more retry for fastopen */
+       max_retries = icsk->icsk_syn_retries ? :
+               READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1;
+
        if (req->num_timeout >= max_retries) {
                tcp_write_err(sk);
                return;
@@ -574,7 +578,7 @@ out_reset_timer:
         * linear-timeout retransmissions into a black hole
         */
        if (sk->sk_state == TCP_ESTABLISHED &&
-           (tp->thin_lto || net->ipv4.sysctl_tcp_thin_linear_timeouts) &&
+           (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) &&
            tcp_stream_is_thin(tp) &&
            icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
                icsk->icsk_backoff = 0;
@@ -585,7 +589,7 @@ out_reset_timer:
        }
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                  tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
-       if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0))
+       if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0))
                __sk_dst_reset(sk);
 
 out:;
index 1b19325..49cc658 100644 (file)
@@ -1109,10 +1109,6 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
                goto out;
        }
 
-       if (net->ipv6.devconf_all->disable_policy ||
-           idev->cnf.disable_policy)
-               f6i->dst_nopolicy = true;
-
        neigh_parms_data_state_setall(idev->nd_parms);
 
        ifa->addr = *cfg->pfx;
@@ -5172,9 +5168,9 @@ next:
                fillargs->event = RTM_GETMULTICAST;
 
                /* multicast address */
-               for (ifmca = rcu_dereference(idev->mc_list);
+               for (ifmca = rtnl_dereference(idev->mc_list);
                     ifmca;
-                    ifmca = rcu_dereference(ifmca->next), ip_idx++) {
+                    ifmca = rtnl_dereference(ifmca->next), ip_idx++) {
                        if (ip_idx < s_ip_idx)
                                continue;
                        err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
index 70564dd..6f354f8 100644 (file)
@@ -226,7 +226,7 @@ lookup_protocol:
        RCU_INIT_POINTER(inet->mc_list, NULL);
        inet->rcv_tos   = 0;
 
-       if (net->ipv4.sysctl_ip_no_pmtu_disc)
+       if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc))
                inet->pmtudisc = IP_PMTUDISC_DONT;
        else
                inet->pmtudisc = IP_PMTUDISC_WANT;
index 6177022..9d92d51 100644 (file)
@@ -925,7 +925,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
                break;
        case ICMPV6_EXT_ECHO_REQUEST:
                if (!net->ipv6.sysctl.icmpv6_echo_ignore_all &&
-                   net->ipv4.sysctl_icmp_echo_enable_probe)
+                   READ_ONCE(net->ipv4.sysctl_icmp_echo_enable_probe))
                        icmpv6_echo_reply(skb);
                break;
 
index 4e37f7c..a9051df 100644 (file)
@@ -939,7 +939,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
        __be16 proto;
        __u32 mtu;
        int nhoff;
-       int thoff;
 
        if (!pskb_inet_may_pull(skb))
                goto tx_err;
@@ -960,10 +959,16 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
            (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
                truncate = true;
 
-       thoff = skb_transport_header(skb) - skb_mac_header(skb);
-       if (skb->protocol == htons(ETH_P_IPV6) &&
-           (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
-               truncate = true;
+       if (skb->protocol == htons(ETH_P_IPV6)) {
+               int thoff;
+
+               if (skb_transport_header_was_set(skb))
+                       thoff = skb_transport_header(skb) - skb_mac_header(skb);
+               else
+                       thoff = nhoff + sizeof(struct ipv6hdr);
+               if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
+                       truncate = true;
+       }
 
        if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
                goto tx_err;
index 0322cc8..e1ebf5e 100644 (file)
 #include <net/inet_ecn.h>
 #include <net/dst_metadata.h>
 
-INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *));
 static void ip6_rcv_finish_core(struct net *net, struct sock *sk,
                                struct sk_buff *skb)
 {
-       void (*edemux)(struct sk_buff *skb);
-
-       if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
-               const struct inet6_protocol *ipprot;
-
-               ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
-               if (ipprot && (edemux = READ_ONCE(ipprot->early_demux)))
-                       INDIRECT_CALL_2(edemux, tcp_v6_early_demux,
-                                       udp_v6_early_demux, skb);
+       if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
+           !skb_dst(skb) && !skb->sk) {
+               switch (ipv6_hdr(skb)->nexthdr) {
+               case IPPROTO_TCP:
+                       if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux))
+                               tcp_v6_early_demux(skb);
+                       break;
+               case IPPROTO_UDP:
+                       if (READ_ONCE(net->ipv4.sysctl_udp_early_demux))
+                               udp_v6_early_demux(skb);
+                       break;
+               }
        }
+
        if (!skb_valid_dst(skb))
                ip6_route_input(skb);
 }
index 7f695c3..87c699d 100644 (file)
@@ -1522,7 +1522,6 @@ static void mld_query_work(struct work_struct *work)
 
                if (++cnt >= MLD_MAX_QUEUE) {
                        rework = true;
-                       schedule_delayed_work(&idev->mc_query_work, 0);
                        break;
                }
        }
@@ -1533,8 +1532,10 @@ static void mld_query_work(struct work_struct *work)
                __mld_query_work(skb);
        mutex_unlock(&idev->mc_lock);
 
-       if (!rework)
-               in6_dev_put(idev);
+       if (rework && queue_delayed_work(mld_wq, &idev->mc_query_work, 0))
+               return;
+
+       in6_dev_put(idev);
 }
 
 /* called with rcu_read_lock() */
@@ -1624,7 +1625,6 @@ static void mld_report_work(struct work_struct *work)
 
                if (++cnt >= MLD_MAX_QUEUE) {
                        rework = true;
-                       schedule_delayed_work(&idev->mc_report_work, 0);
                        break;
                }
        }
@@ -1635,8 +1635,10 @@ static void mld_report_work(struct work_struct *work)
                __mld_report_work(skb);
        mutex_unlock(&idev->mc_lock);
 
-       if (!rework)
-               in6_dev_put(idev);
+       if (rework && queue_delayed_work(mld_wq, &idev->mc_report_work, 0))
+               return;
+
+       in6_dev_put(idev);
 }
 
 static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
index ecf3a55..8c6c2d8 100644 (file)
 #include <linux/proc_fs.h>
 #include <net/ping.h>
 
+static void ping_v6_destroy(struct sock *sk)
+{
+       inet6_destroy_sock(sk);
+}
+
 /* Compatibility glue so we can support IPv6 when it's compiled as a module */
 static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
                                 int *addr_len)
@@ -181,6 +186,7 @@ struct proto pingv6_prot = {
        .owner =        THIS_MODULE,
        .init =         ping_init_sock,
        .close =        ping_close,
+       .destroy =      ping_v6_destroy,
        .connect =      ip6_datagram_connect_v6_only,
        .disconnect =   __udp_disconnect,
        .setsockopt =   ipv6_setsockopt,
index d25dc83..9164179 100644 (file)
@@ -4569,8 +4569,15 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
        }
 
        f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
-       if (!IS_ERR(f6i))
+       if (!IS_ERR(f6i)) {
                f6i->dst_nocount = true;
+
+               if (!anycast &&
+                   (net->ipv6.devconf_all->disable_policy ||
+                    idev->cnf.disable_policy))
+                       f6i->dst_nopolicy = true;
+       }
+
        return f6i;
 }
 
@@ -5734,7 +5741,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
                if (nexthop_is_blackhole(rt->nh))
                        rtm->rtm_type = RTN_BLACKHOLE;
 
-               if (net->ipv4.sysctl_nexthop_compat_mode &&
+               if (READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode) &&
                    rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
                        goto nla_put_failure;
 
index 6de0118..d43c50a 100644 (file)
@@ -406,7 +406,6 @@ int __net_init seg6_hmac_net_init(struct net *net)
 
        return rhashtable_init(&sdata->hmac_infos, &rht_params);
 }
-EXPORT_SYMBOL(seg6_hmac_net_init);
 
 void seg6_hmac_exit(void)
 {
index d648550..e756ba7 100644 (file)
@@ -189,6 +189,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
        }
 #endif
 
+       hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
+
        skb_postpush_rcsum(skb, hdr, tot_len);
 
        return 0;
@@ -241,6 +243,8 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
        }
 #endif
 
+       hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
+
        skb_postpush_rcsum(skb, hdr, sizeof(struct ipv6hdr) + hdrlen);
 
        return 0;
@@ -302,7 +306,6 @@ static int seg6_do_srh(struct sk_buff *skb)
                break;
        }
 
-       ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
        skb_set_transport_header(skb, sizeof(struct ipv6hdr));
        nf_reset_ct(skb);
 
index 98a3428..2cd4a8d 100644 (file)
@@ -826,7 +826,6 @@ static int input_action_end_b6(struct sk_buff *skb, struct seg6_local_lwt *slwt)
        if (err)
                goto drop;
 
-       ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
        skb_set_transport_header(skb, sizeof(struct ipv6hdr));
 
        seg6_lookup_nexthop(skb, NULL, 0);
@@ -858,7 +857,6 @@ static int input_action_end_b6_encap(struct sk_buff *skb,
        if (err)
                goto drop;
 
-       ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
        skb_set_transport_header(skb, sizeof(struct ipv6hdr));
 
        seg6_lookup_nexthop(skb, NULL, 0);
index c0b138c..6bcd5e4 100644 (file)
@@ -323,8 +323,6 @@ static int ipip6_tunnel_get_prl(struct net_device *dev, struct ip_tunnel_prl __u
                kcalloc(cmax, sizeof(*kp), GFP_KERNEL_ACCOUNT | __GFP_NOWARN) :
                NULL;
 
-       rcu_read_lock();
-
        ca = min(t->prl_count, cmax);
 
        if (!kp) {
@@ -341,7 +339,7 @@ static int ipip6_tunnel_get_prl(struct net_device *dev, struct ip_tunnel_prl __u
                }
        }
 
-       c = 0;
+       rcu_read_lock();
        for_each_prl_rcu(t->prl) {
                if (c >= cmax)
                        break;
@@ -353,7 +351,7 @@ static int ipip6_tunnel_get_prl(struct net_device *dev, struct ip_tunnel_prl __u
                if (kprl.addr != htonl(INADDR_ANY))
                        break;
        }
-out:
+
        rcu_read_unlock();
 
        len = sizeof(*kp) * c;
@@ -362,7 +360,7 @@ out:
                ret = -EFAULT;
 
        kfree(kp);
-
+out:
        return ret;
 }
 
index 9cc123f..5014aa6 100644 (file)
@@ -141,7 +141,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
        __u8 rcv_wscale;
        u32 tsoff = 0;
 
-       if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
+       if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) ||
+           !th->ack || th->rst)
                goto out;
 
        if (tcp_synq_no_recent_overflow(sk))
index f37dd4a..be09941 100644 (file)
@@ -546,7 +546,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
                if (np->repflow && ireq->pktopts)
                        fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 
-               tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
+               tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
                                (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
                                (np->tclass & INET_ECN_MASK) :
                                np->tclass;
@@ -1314,7 +1314,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
        /* Set ToS of the new socket based upon the value of incoming SYN.
         * ECT bits are set later in tcp_init_transfer().
         */
-       if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
                newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
 
        /* Clone native IPv6 options from listening socket (if any)
@@ -1822,7 +1822,7 @@ do_time_wait:
        goto discard_it;
 }
 
-INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
+void tcp_v6_early_demux(struct sk_buff *skb)
 {
        const struct ipv6hdr *hdr;
        const struct tcphdr *th;
@@ -2176,12 +2176,7 @@ struct proto tcpv6_prot = {
 };
 EXPORT_SYMBOL_GPL(tcpv6_prot);
 
-/* thinking of making this const? Don't.
- * early_demux can change based on sysctl.
- */
-static struct inet6_protocol tcpv6_protocol = {
-       .early_demux    =       tcp_v6_early_demux,
-       .early_demux_handler =  tcp_v6_early_demux,
+static const struct inet6_protocol tcpv6_protocol = {
        .handler        =       tcp_v6_rcv,
        .err_handler    =       tcp_v6_err,
        .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
index 55afd7f..e2f2e08 100644 (file)
@@ -1052,7 +1052,7 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net,
        return NULL;
 }
 
-INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
+void udp_v6_early_demux(struct sk_buff *skb)
 {
        struct net *net = dev_net(skb->dev);
        const struct udphdr *uh;
@@ -1660,12 +1660,7 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname,
        return ipv6_getsockopt(sk, level, optname, optval, optlen);
 }
 
-/* thinking of making this const? Don't.
- * early_demux can change based on sysctl.
- */
-static struct inet6_protocol udpv6_protocol = {
-       .early_demux    =       udp_v6_early_demux,
-       .early_demux_handler =  udp_v6_early_demux,
+static const struct inet6_protocol udpv6_protocol = {
        .handler        =       udpv6_rcv,
        .err_handler    =       udpv6_err,
        .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
index f7896f2..4ddf297 100644 (file)
@@ -4468,14 +4468,14 @@ EXPORT_SYMBOL_GPL(ieee80211_color_change_finish);
 
 void
 ieeee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
-                                      u64 color_bitmap)
+                                      u64 color_bitmap, gfp_t gfp)
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 
        if (sdata->vif.color_change_active || sdata->vif.csa_active)
                return;
 
-       cfg80211_obss_color_collision_notify(sdata->dev, color_bitmap);
+       cfg80211_obss_color_collision_notify(sdata->dev, color_bitmap, gfp);
 }
 EXPORT_SYMBOL_GPL(ieeee80211_obss_color_collision_notify);
 
index 4153147..1a9ada4 100644 (file)
@@ -378,6 +378,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
        struct cfg80211_nan_func *func;
 
        clear_bit(SDATA_STATE_RUNNING, &sdata->state);
+       synchronize_rcu(); /* flush _ieee80211_wake_txqs() */
 
        cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata;
        if (cancel_scan)
index 3c08ae0..1675f8c 100644 (file)
@@ -3217,7 +3217,8 @@ ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
                                      IEEE80211_HE_OPERATION_BSS_COLOR_MASK);
                if (color == bss_conf->he_bss_color.color)
                        ieeee80211_obss_color_collision_notify(&rx->sdata->vif,
-                                                              BIT_ULL(color));
+                                                              BIT_ULL(color),
+                                                              GFP_ATOMIC);
        }
 }
 
index 0e4efc0..c425f4f 100644 (file)
@@ -2818,19 +2818,10 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
        /*
         * If the skb is shared we need to obtain our own copy.
         */
-       if (skb_shared(skb)) {
-               struct sk_buff *tmp_skb = skb;
-
-               /* can't happen -- skb is a clone if info_id != 0 */
-               WARN_ON(info_id);
-
-               skb = skb_clone(skb, GFP_ATOMIC);
-               kfree_skb(tmp_skb);
-
-               if (!skb) {
-                       ret = -ENOMEM;
-                       goto free;
-               }
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (unlikely(!skb)) {
+               ret = -ENOMEM;
+               goto free;
        }
 
        hdr.frame_control = fc;
@@ -3539,15 +3530,9 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
 
        /* after this point (skb is modified) we cannot return false */
 
-       if (skb_shared(skb)) {
-               struct sk_buff *tmp_skb = skb;
-
-               skb = skb_clone(skb, GFP_ATOMIC);
-               kfree_skb(tmp_skb);
-
-               if (!skb)
-                       return true;
-       }
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (unlikely(!skb))
+               return true;
 
        if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) &&
            ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb))
@@ -4437,7 +4422,7 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
                                struct net_device *dev, struct sta_info *sta,
                                struct ieee80211_key *key, struct sk_buff *skb)
 {
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_tx_info *info;
        struct ieee80211_local *local = sdata->local;
        struct tid_ampdu_tx *tid_tx;
        u8 tid;
@@ -4452,6 +4437,11 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
            test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
                goto out_free;
 
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (unlikely(!skb))
+               return;
+
+       info = IEEE80211_SKB_CB(skb);
        memset(info, 0, sizeof(*info));
 
        ieee80211_aggr_check(sdata, sta, skb);
index 1e26b52..dad42d4 100644 (file)
@@ -301,6 +301,9 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
        local_bh_disable();
        spin_lock(&fq->lock);
 
+       if (!test_bit(SDATA_STATE_RUNNING, &sdata->state))
+               goto out;
+
        if (sdata->vif.type == NL80211_IFTYPE_AP)
                ps = &sdata->bss->ps;
 
index 62c6733..d50480b 100644 (file)
@@ -147,8 +147,8 @@ u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
        bool qos;
 
        /* all mesh/ocb stations are required to support WME */
-       if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
-           sdata->vif.type == NL80211_IFTYPE_OCB)
+       if (sta && (sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
+                   sdata->vif.type == NL80211_IFTYPE_OCB))
                qos = true;
        else if (sta)
                qos = sta->sta.wme;
index be3b918..30d2890 100644 (file)
@@ -765,6 +765,7 @@ static noinline bool mptcp_established_options_rst(struct sock *sk, struct sk_bu
        opts->suboptions |= OPTION_MPTCP_RST;
        opts->reset_transient = subflow->reset_transient;
        opts->reset_reason = subflow->reset_reason;
+       MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPRSTTX);
 
        return true;
 }
@@ -788,6 +789,7 @@ static bool mptcp_established_options_fastclose(struct sock *sk,
        opts->rcvr_key = msk->remote_key;
 
        pr_debug("FASTCLOSE key=%llu", opts->rcvr_key);
+       MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX);
        return true;
 }
 
@@ -809,6 +811,7 @@ static bool mptcp_established_options_mp_fail(struct sock *sk,
        opts->fail_seq = subflow->map_seq;
 
        pr_debug("MP_FAIL fail_seq=%llu", opts->fail_seq);
+       MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
 
        return true;
 }
@@ -833,13 +836,11 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
                    mptcp_established_options_mp_fail(sk, &opt_size, remaining, opts)) {
                        *size += opt_size;
                        remaining -= opt_size;
-                       MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX);
                }
                /* MP_RST can be used with MP_FASTCLOSE and MP_FAIL if there is room */
                if (mptcp_established_options_rst(sk, skb, &opt_size, remaining, opts)) {
                        *size += opt_size;
                        remaining -= opt_size;
-                       MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPRSTTX);
                }
                return true;
        }
@@ -966,7 +967,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
                        goto reset;
                subflow->mp_capable = 0;
                pr_fallback(msk);
-               __mptcp_do_fallback(msk);
+               mptcp_do_fallback(ssk);
                return false;
        }
 
@@ -1270,7 +1271,7 @@ raise_win:
                if (unlikely(th->syn))
                        new_win = min(new_win, 65535U) << tp->rx_opt.rcv_wscale;
                if (!tp->rx_opt.rcv_wscale &&
-                   sock_net(ssk)->ipv4.sysctl_tcp_workaround_signed_windows)
+                   READ_ONCE(sock_net(ssk)->ipv4.sysctl_tcp_workaround_signed_windows))
                        new_win = min(new_win, MAX_TCP_WINDOW);
                else
                        new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
@@ -1583,6 +1584,9 @@ mp_rst:
                *ptr++ = mptcp_option(MPTCPOPT_MP_PRIO,
                                      TCPOLEN_MPTCP_PRIO,
                                      opts->backup, TCPOPT_NOP);
+
+               MPTCP_INC_STATS(sock_net((const struct sock *)tp),
+                               MPTCP_MIB_MPPRIOTX);
        }
 
 mp_capable_done:
index 59a8522..45e2a48 100644 (file)
@@ -299,23 +299,21 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
 {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
        struct mptcp_sock *msk = mptcp_sk(subflow->conn);
-       struct sock *s = (struct sock *)msk;
 
        pr_debug("fail_seq=%llu", fail_seq);
 
        if (!READ_ONCE(msk->allow_infinite_fallback))
                return;
 
-       if (!READ_ONCE(subflow->mp_fail_response_expect)) {
+       if (!subflow->fail_tout) {
                pr_debug("send MP_FAIL response and infinite map");
 
                subflow->send_mp_fail = 1;
-               MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
                subflow->send_infinite_map = 1;
-       } else if (!sock_flag(sk, SOCK_DEAD)) {
+               tcp_send_ack(sk);
+       } else {
                pr_debug("MP_FAIL response received");
-
-               sk_stop_timer(s, &s->sk_timer);
+               WRITE_ONCE(subflow->fail_tout, 0);
        }
 }
 
index e099f2a..7c7395b 100644 (file)
@@ -717,9 +717,10 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
        }
 }
 
-static int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
-                                       struct mptcp_addr_info *addr,
-                                       u8 bkup)
+int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
+                                struct mptcp_addr_info *addr,
+                                struct mptcp_addr_info *rem,
+                                u8 bkup)
 {
        struct mptcp_subflow_context *subflow;
 
@@ -727,24 +728,29 @@ static int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
 
        mptcp_for_each_subflow(msk, subflow) {
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-               struct sock *sk = (struct sock *)msk;
-               struct mptcp_addr_info local;
+               struct mptcp_addr_info local, remote;
+               bool slow;
 
                local_address((struct sock_common *)ssk, &local);
                if (!mptcp_addresses_equal(&local, addr, addr->port))
                        continue;
 
+               if (rem && rem->family != AF_UNSPEC) {
+                       remote_address((struct sock_common *)ssk, &remote);
+                       if (!mptcp_addresses_equal(&remote, rem, rem->port))
+                               continue;
+               }
+
+               slow = lock_sock_fast(ssk);
                if (subflow->backup != bkup)
                        msk->last_snd = NULL;
                subflow->backup = bkup;
                subflow->send_mp_prio = 1;
                subflow->request_bkup = bkup;
-               __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPPRIOTX);
 
-               spin_unlock_bh(&msk->pm.lock);
                pr_debug("send ack for mp_prio");
-               mptcp_subflow_send_ack(ssk);
-               spin_lock_bh(&msk->pm.lock);
+               __mptcp_subflow_send_ack(ssk);
+               unlock_sock_fast(ssk, slow);
 
                return 0;
        }
@@ -801,7 +807,8 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
                        removed = true;
                        __MPTCP_INC_STATS(sock_net(sk), rm_type);
                }
-               __set_bit(rm_list->ids[i], msk->pm.id_avail_bitmap);
+               if (rm_type == MPTCP_MIB_RMSUBFLOW)
+                       __set_bit(rm_list->ids[i], msk->pm.id_avail_bitmap);
                if (!removed)
                        continue;
 
@@ -1816,8 +1823,10 @@ static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk,
 
        list.ids[list.nr++] = addr->id;
 
+       spin_lock_bh(&msk->pm.lock);
        mptcp_pm_nl_rm_subflow_received(msk, &list);
        mptcp_pm_create_subflow_or_signal_addr(msk);
+       spin_unlock_bh(&msk->pm.lock);
 }
 
 static int mptcp_nl_set_flags(struct net *net,
@@ -1835,12 +1844,10 @@ static int mptcp_nl_set_flags(struct net *net,
                        goto next;
 
                lock_sock(sk);
-               spin_lock_bh(&msk->pm.lock);
                if (changed & MPTCP_PM_ADDR_FLAG_BACKUP)
-                       ret = mptcp_pm_nl_mp_prio_send_ack(msk, addr, bkup);
+                       ret = mptcp_pm_nl_mp_prio_send_ack(msk, addr, NULL, bkup);
                if (changed & MPTCP_PM_ADDR_FLAG_FULLMESH)
                        mptcp_pm_nl_fullmesh(msk, addr);
-               spin_unlock_bh(&msk->pm.lock);
                release_sock(sk);
 
 next:
@@ -1854,6 +1861,9 @@ next:
 static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info)
 {
        struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, }, *entry;
+       struct mptcp_pm_addr_entry remote = { .addr = { .family = AF_UNSPEC }, };
+       struct nlattr *attr_rem = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
+       struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
        struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
        struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
        u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP |
@@ -1866,6 +1876,12 @@ static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info)
        if (ret < 0)
                return ret;
 
+       if (attr_rem) {
+               ret = mptcp_pm_parse_entry(attr_rem, info, false, &remote);
+               if (ret < 0)
+                       return ret;
+       }
+
        if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
                bkup = 1;
        if (addr.addr.family == AF_UNSPEC) {
@@ -1874,6 +1890,10 @@ static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info)
                        return -EOPNOTSUPP;
        }
 
+       if (token)
+               return mptcp_userspace_pm_set_flags(sock_net(skb->sk),
+                                                   token, &addr, &remote, bkup);
+
        spin_lock_bh(&pernet->lock);
        entry = __lookup_addr(pernet, &addr.addr, lookup_by_id);
        if (!entry) {
index f56378e..9e82250 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include "protocol.h"
+#include "mib.h"
 
 void mptcp_free_local_addr_list(struct mptcp_sock *msk)
 {
@@ -306,15 +307,11 @@ static struct sock *mptcp_nl_find_ssk(struct mptcp_sock *msk,
                                      const struct mptcp_addr_info *local,
                                      const struct mptcp_addr_info *remote)
 {
-       struct sock *sk = &msk->sk.icsk_inet.sk;
        struct mptcp_subflow_context *subflow;
-       struct sock *found = NULL;
 
        if (local->family != remote->family)
                return NULL;
 
-       lock_sock(sk);
-
        mptcp_for_each_subflow(msk, subflow) {
                const struct inet_sock *issk;
                struct sock *ssk;
@@ -347,16 +344,11 @@ static struct sock *mptcp_nl_find_ssk(struct mptcp_sock *msk,
                }
 
                if (issk->inet_sport == local->port &&
-                   issk->inet_dport == remote->port) {
-                       found = ssk;
-                       goto found;
-               }
+                   issk->inet_dport == remote->port)
+                       return ssk;
        }
 
-found:
-       release_sock(sk);
-
-       return found;
+       return NULL;
 }
 
 int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info)
@@ -412,18 +404,51 @@ int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info)
        }
 
        sk = &msk->sk.icsk_inet.sk;
+       lock_sock(sk);
        ssk = mptcp_nl_find_ssk(msk, &addr_l, &addr_r);
        if (ssk) {
                struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
 
                mptcp_subflow_shutdown(sk, ssk, RCV_SHUTDOWN | SEND_SHUTDOWN);
                mptcp_close_ssk(sk, ssk, subflow);
+               MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RMSUBFLOW);
                err = 0;
        } else {
                err = -ESRCH;
        }
+       release_sock(sk);
 
- destroy_err:
+destroy_err:
        sock_put((struct sock *)msk);
        return err;
 }
+
+int mptcp_userspace_pm_set_flags(struct net *net, struct nlattr *token,
+                                struct mptcp_pm_addr_entry *loc,
+                                struct mptcp_pm_addr_entry *rem, u8 bkup)
+{
+       struct mptcp_sock *msk;
+       int ret = -EINVAL;
+       u32 token_val;
+
+       token_val = nla_get_u32(token);
+
+       msk = mptcp_token_get_sock(net, token_val);
+       if (!msk)
+               return ret;
+
+       if (!mptcp_pm_is_userspace(msk))
+               goto set_flags_err;
+
+       if (loc->addr.family == AF_UNSPEC ||
+           rem->addr.family == AF_UNSPEC)
+               goto set_flags_err;
+
+       lock_sock((struct sock *)msk);
+       ret = mptcp_pm_nl_mp_prio_send_ack(msk, &loc->addr, &rem->addr, bkup);
+       release_sock((struct sock *)msk);
+
+set_flags_err:
+       sock_put((struct sock *)msk);
+       return ret;
+}
index 17e1339..7e1518b 100644 (file)
@@ -500,19 +500,24 @@ static void mptcp_set_timeout(struct sock *sk)
        __mptcp_set_timeout(sk, tout);
 }
 
-static bool tcp_can_send_ack(const struct sock *ssk)
+static inline bool tcp_can_send_ack(const struct sock *ssk)
 {
        return !((1 << inet_sk_state_load(ssk)) &
               (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN));
 }
 
+void __mptcp_subflow_send_ack(struct sock *ssk)
+{
+       if (tcp_can_send_ack(ssk))
+               tcp_send_ack(ssk);
+}
+
 void mptcp_subflow_send_ack(struct sock *ssk)
 {
        bool slow;
 
        slow = lock_sock_fast(ssk);
-       if (tcp_can_send_ack(ssk))
-               tcp_send_ack(ssk);
+       __mptcp_subflow_send_ack(ssk);
        unlock_sock_fast(ssk, slow);
 }
 
@@ -1245,7 +1250,7 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk,
        MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPTX);
        mptcp_subflow_ctx(ssk)->send_infinite_map = 0;
        pr_fallback(msk);
-       __mptcp_do_fallback(msk);
+       mptcp_do_fallback(ssk);
 }
 
 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
@@ -1903,7 +1908,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
        if (msk->rcvq_space.copied <= msk->rcvq_space.space)
                goto new_measure;
 
-       if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
                int rcvmem, rcvbuf;
                u64 rcvwin, grow;
@@ -1921,7 +1926,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
 
                do_div(rcvwin, advmss);
                rcvbuf = min_t(u64, rcvwin * rcvmem,
-                              sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+                              READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
 
                if (rcvbuf > sk->sk_rcvbuf) {
                        u32 window_clamp;
@@ -2175,21 +2180,6 @@ static void mptcp_retransmit_timer(struct timer_list *t)
        sock_put(sk);
 }
 
-static struct mptcp_subflow_context *
-mp_fail_response_expect_subflow(struct mptcp_sock *msk)
-{
-       struct mptcp_subflow_context *subflow, *ret = NULL;
-
-       mptcp_for_each_subflow(msk, subflow) {
-               if (READ_ONCE(subflow->mp_fail_response_expect)) {
-                       ret = subflow;
-                       break;
-               }
-       }
-
-       return ret;
-}
-
 static void mptcp_timeout_timer(struct timer_list *t)
 {
        struct sock *sk = from_timer(sk, t, sk_timer);
@@ -2346,6 +2336,11 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
                kfree_rcu(subflow, rcu);
        } else {
                /* otherwise tcp will dispose of the ssk and subflow ctx */
+               if (ssk->sk_state == TCP_LISTEN) {
+                       tcp_set_state(ssk, TCP_CLOSE);
+                       mptcp_subflow_queue_clean(ssk);
+                       inet_csk_listen_stop(ssk);
+               }
                __tcp_close(ssk, 0);
 
                /* close acquired an extra ref */
@@ -2518,27 +2513,50 @@ reset_timer:
                mptcp_reset_timer(sk);
 }
 
+/* schedule the timeout timer for the relevant event: either close timeout
+ * or mp_fail timeout. The close timeout takes precedence on the mp_fail one
+ */
+void mptcp_reset_timeout(struct mptcp_sock *msk, unsigned long fail_tout)
+{
+       struct sock *sk = (struct sock *)msk;
+       unsigned long timeout, close_timeout;
+
+       if (!fail_tout && !sock_flag(sk, SOCK_DEAD))
+               return;
+
+       close_timeout = inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies + TCP_TIMEWAIT_LEN;
+
+       /* the close timeout takes precedence on the fail one, and here at least one of
+        * them is active
+        */
+       timeout = sock_flag(sk, SOCK_DEAD) ? close_timeout : fail_tout;
+
+       sk_reset_timer(sk, &sk->sk_timer, timeout);
+}
+
 static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
 {
-       struct mptcp_subflow_context *subflow;
-       struct sock *ssk;
+       struct sock *ssk = msk->first;
        bool slow;
 
-       subflow = mp_fail_response_expect_subflow(msk);
-       if (subflow) {
-               pr_debug("MP_FAIL doesn't respond, reset the subflow");
+       if (!ssk)
+               return;
 
-               ssk = mptcp_subflow_tcp_sock(subflow);
-               slow = lock_sock_fast(ssk);
-               mptcp_subflow_reset(ssk);
-               unlock_sock_fast(ssk, slow);
-       }
+       pr_debug("MP_FAIL doesn't respond, reset the subflow");
+
+       slow = lock_sock_fast(ssk);
+       mptcp_subflow_reset(ssk);
+       WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0);
+       unlock_sock_fast(ssk, slow);
+
+       mptcp_reset_timeout(msk, 0);
 }
 
 static void mptcp_worker(struct work_struct *work)
 {
        struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
        struct sock *sk = &msk->sk.icsk_inet.sk;
+       unsigned long fail_tout;
        int state;
 
        lock_sock(sk);
@@ -2575,7 +2593,9 @@ static void mptcp_worker(struct work_struct *work)
        if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
                __mptcp_retrans(sk);
 
-       mptcp_mp_fail_no_response(msk);
+       fail_tout = msk->first ? READ_ONCE(mptcp_subflow_ctx(msk->first)->fail_tout) : 0;
+       if (fail_tout && time_after(jiffies, fail_tout))
+               mptcp_mp_fail_no_response(msk);
 
 unlock:
        release_sock(sk);
@@ -2649,8 +2669,8 @@ static int mptcp_init_sock(struct sock *sk)
        mptcp_ca_reset(sk);
 
        sk_sockets_allocated_inc(sk);
-       sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
-       sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
+       sk->sk_rcvbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
+       sk->sk_sndbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
 
        return 0;
 }
@@ -2822,6 +2842,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
 static void mptcp_close(struct sock *sk, long timeout)
 {
        struct mptcp_subflow_context *subflow;
+       struct mptcp_sock *msk = mptcp_sk(sk);
        bool do_cancel_work = false;
 
        lock_sock(sk);
@@ -2840,10 +2861,16 @@ static void mptcp_close(struct sock *sk, long timeout)
 cleanup:
        /* orphan all the subflows */
        inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
-       mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
+       mptcp_for_each_subflow(msk, subflow) {
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
                bool slow = lock_sock_fast_nested(ssk);
 
+               /* since the close timeout takes precedence on the fail one,
+                * cancel the latter
+                */
+               if (ssk == msk->first)
+                       subflow->fail_tout = 0;
+
                sock_orphan(ssk);
                unlock_sock_fast(ssk, slow);
        }
@@ -2852,13 +2879,13 @@ cleanup:
        sock_hold(sk);
        pr_debug("msk=%p state=%d", sk, sk->sk_state);
        if (mptcp_sk(sk)->token)
-               mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL);
+               mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
 
        if (sk->sk_state == TCP_CLOSE) {
                __mptcp_destroy_sock(sk);
                do_cancel_work = true;
        } else {
-               sk_reset_timer(sk, &sk->sk_timer, jiffies + TCP_TIMEWAIT_LEN);
+               mptcp_reset_timeout(msk, 0);
        }
        release_sock(sk);
        if (do_cancel_work)
@@ -2892,12 +2919,12 @@ static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
 
 static int mptcp_disconnect(struct sock *sk, int flags)
 {
-       struct mptcp_subflow_context *subflow;
+       struct mptcp_subflow_context *subflow, *tmp;
        struct mptcp_sock *msk = mptcp_sk(sk);
 
        inet_sk_state_store(sk, TCP_CLOSE);
 
-       mptcp_for_each_subflow(msk, subflow) {
+       list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
 
                __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_FASTCLOSE);
index 200f89f..480c532 100644 (file)
@@ -306,6 +306,7 @@ struct mptcp_sock {
 
        u32 setsockopt_seq;
        char            ca_name[TCP_CA_NAME_MAX];
+       struct mptcp_sock       *dl_next;
 };
 
 #define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock)
@@ -468,7 +469,6 @@ struct mptcp_subflow_context {
                local_id_valid : 1, /* local_id is correctly initialized */
                valid_csum_seen : 1;        /* at least one csum validated */
        enum mptcp_data_avail data_avail;
-       bool    mp_fail_response_expect;
        u32     remote_nonce;
        u64     thmac;
        u32     local_nonce;
@@ -482,6 +482,7 @@ struct mptcp_subflow_context {
        u8      stale_count;
 
        long    delegated_status;
+       unsigned long   fail_tout;
 
        );
 
@@ -606,8 +607,10 @@ void __init mptcp_subflow_init(void);
 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how);
 void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
                     struct mptcp_subflow_context *subflow);
+void __mptcp_subflow_send_ack(struct sock *ssk);
 void mptcp_subflow_send_ack(struct sock *ssk);
 void mptcp_subflow_reset(struct sock *ssk);
+void mptcp_subflow_queue_clean(struct sock *ssk);
 void mptcp_sock_graft(struct sock *sk, struct socket *parent);
 struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
 
@@ -662,6 +665,7 @@ void mptcp_get_options(const struct sk_buff *skb,
 
 void mptcp_finish_connect(struct sock *sk);
 void __mptcp_set_connected(struct sock *sk);
+void mptcp_reset_timeout(struct mptcp_sock *msk, unsigned long fail_tout);
 static inline bool mptcp_is_fully_established(struct sock *sk)
 {
        return inet_sk_state_load(sk) == TCP_ESTABLISHED &&
@@ -768,6 +772,10 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
                               const struct mptcp_rm_list *rm_list);
 void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup);
 void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq);
+int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
+                                struct mptcp_addr_info *addr,
+                                struct mptcp_addr_info *rem,
+                                u8 bkup);
 bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
                              const struct mptcp_pm_addr_entry *entry);
 void mptcp_pm_free_anno_list(struct mptcp_sock *msk);
@@ -784,7 +792,9 @@ int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
 int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
                                                   unsigned int id,
                                                   u8 *flags, int *ifindex);
-
+int mptcp_userspace_pm_set_flags(struct net *net, struct nlattr *token,
+                                struct mptcp_pm_addr_entry *loc,
+                                struct mptcp_pm_addr_entry *rem, u8 bkup);
 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
                           const struct mptcp_addr_info *addr,
                           bool echo);
@@ -926,12 +936,25 @@ static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
        set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
 }
 
-static inline void mptcp_do_fallback(struct sock *sk)
+static inline void mptcp_do_fallback(struct sock *ssk)
 {
-       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
-       struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+       struct sock *sk = subflow->conn;
+       struct mptcp_sock *msk;
 
+       msk = mptcp_sk(sk);
        __mptcp_do_fallback(msk);
+       if (READ_ONCE(msk->snd_data_fin_enable) && !(ssk->sk_shutdown & SEND_SHUTDOWN)) {
+               gfp_t saved_allocation = ssk->sk_allocation;
+
+               /* we are in a atomic (BH) scope, override ssk default for data
+                * fin allocation
+                */
+               ssk->sk_allocation = GFP_ATOMIC;
+               ssk->sk_shutdown |= SEND_SHUTDOWN;
+               tcp_shutdown(ssk, SEND_SHUTDOWN);
+               ssk->sk_allocation = saved_allocation;
+       }
 }
 
 #define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
index 8841e8c..af28f3b 100644 (file)
@@ -843,7 +843,8 @@ enum mapping_status {
        MAPPING_INVALID,
        MAPPING_EMPTY,
        MAPPING_DATA_FIN,
-       MAPPING_DUMMY
+       MAPPING_DUMMY,
+       MAPPING_BAD_CSUM
 };
 
 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
@@ -958,11 +959,7 @@ static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *
                                 subflow->map_data_csum);
        if (unlikely(csum)) {
                MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
-               if (subflow->mp_join || subflow->valid_csum_seen) {
-                       subflow->send_mp_fail = 1;
-                       MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPFAILTX);
-               }
-               return subflow->mp_join ? MAPPING_INVALID : MAPPING_DUMMY;
+               return MAPPING_BAD_CSUM;
        }
 
        subflow->valid_csum_seen = 1;
@@ -974,7 +971,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
 {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
        bool csum_reqd = READ_ONCE(msk->csum_enabled);
-       struct sock *sk = (struct sock *)msk;
        struct mptcp_ext *mpext;
        struct sk_buff *skb;
        u16 data_len;
@@ -1016,9 +1012,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
                pr_debug("infinite mapping received");
                MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
                subflow->map_data_len = 0;
-               if (!sock_flag(ssk, SOCK_DEAD))
-                       sk_stop_timer(sk, &sk->sk_timer);
-
                return MAPPING_INVALID;
        }
 
@@ -1165,6 +1158,33 @@ static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
                return !subflow->fully_established;
 }
 
+static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
+{
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+       unsigned long fail_tout;
+
+       /* greceful failure can happen only on the MPC subflow */
+       if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
+               return;
+
+       /* since the close timeout take precedence on the fail one,
+        * no need to start the latter when the first is already set
+        */
+       if (sock_flag((struct sock *)msk, SOCK_DEAD))
+               return;
+
+       /* we don't need extreme accuracy here, use a zero fail_tout as special
+        * value meaning no fail timeout at all;
+        */
+       fail_tout = jiffies + TCP_RTO_MAX;
+       if (!fail_tout)
+               fail_tout = 1;
+       WRITE_ONCE(subflow->fail_tout, fail_tout);
+       tcp_send_ack(ssk);
+
+       mptcp_reset_timeout(msk, subflow->fail_tout);
+}
+
 static bool subflow_check_data_avail(struct sock *ssk)
 {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
@@ -1184,10 +1204,8 @@ static bool subflow_check_data_avail(struct sock *ssk)
 
                status = get_mapping_status(ssk, msk);
                trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
-               if (unlikely(status == MAPPING_INVALID))
-                       goto fallback;
-
-               if (unlikely(status == MAPPING_DUMMY))
+               if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY ||
+                            status == MAPPING_BAD_CSUM))
                        goto fallback;
 
                if (status != MAPPING_OK)
@@ -1229,22 +1247,17 @@ no_data:
 fallback:
        if (!__mptcp_check_fallback(msk)) {
                /* RFC 8684 section 3.7. */
-               if (subflow->send_mp_fail) {
+               if (status == MAPPING_BAD_CSUM &&
+                   (subflow->mp_join || subflow->valid_csum_seen)) {
+                       subflow->send_mp_fail = 1;
+
                        if (!READ_ONCE(msk->allow_infinite_fallback)) {
-                               ssk->sk_err = EBADMSG;
-                               tcp_set_state(ssk, TCP_CLOSE);
                                subflow->reset_transient = 0;
                                subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
-                               tcp_send_active_reset(ssk, GFP_ATOMIC);
-                               while ((skb = skb_peek(&ssk->sk_receive_queue)))
-                                       sk_eat_skb(ssk, skb);
-                       } else if (!sock_flag(ssk, SOCK_DEAD)) {
-                               WRITE_ONCE(subflow->mp_fail_response_expect, true);
-                               sk_reset_timer((struct sock *)msk,
-                                              &((struct sock *)msk)->sk_timer,
-                                              jiffies + TCP_RTO_MAX);
+                               goto reset;
                        }
-                       WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
+                       mptcp_subflow_fail(msk, ssk);
+                       WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
                        return true;
                }
 
@@ -1252,16 +1265,20 @@ fallback:
                        /* fatal protocol error, close the socket.
                         * subflow_error_report() will introduce the appropriate barriers
                         */
-                       ssk->sk_err = EBADMSG;
-                       tcp_set_state(ssk, TCP_CLOSE);
                        subflow->reset_transient = 0;
                        subflow->reset_reason = MPTCP_RST_EMPTCP;
+
+reset:
+                       ssk->sk_err = EBADMSG;
+                       tcp_set_state(ssk, TCP_CLOSE);
+                       while ((skb = skb_peek(&ssk->sk_receive_queue)))
+                               sk_eat_skb(ssk, skb);
                        tcp_send_active_reset(ssk, GFP_ATOMIC);
                        WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
                        return false;
                }
 
-               __mptcp_do_fallback(msk);
+               mptcp_do_fallback(ssk);
        }
 
        skb = skb_peek(&ssk->sk_receive_queue);
@@ -1516,7 +1533,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
        mptcp_sock_graft(ssk, sk->sk_socket);
        iput(SOCK_INODE(sf));
        WRITE_ONCE(msk->allow_infinite_fallback, false);
-       return err;
+       return 0;
 
 failed_unlink:
        list_del(&subflow->node);
@@ -1706,6 +1723,58 @@ static void subflow_state_change(struct sock *sk)
        }
 }
 
+void mptcp_subflow_queue_clean(struct sock *listener_ssk)
+{
+       struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
+       struct mptcp_sock *msk, *next, *head = NULL;
+       struct request_sock *req;
+
+       /* build a list of all unaccepted mptcp sockets */
+       spin_lock_bh(&queue->rskq_lock);
+       for (req = queue->rskq_accept_head; req; req = req->dl_next) {
+               struct mptcp_subflow_context *subflow;
+               struct sock *ssk = req->sk;
+               struct mptcp_sock *msk;
+
+               if (!sk_is_mptcp(ssk))
+                       continue;
+
+               subflow = mptcp_subflow_ctx(ssk);
+               if (!subflow || !subflow->conn)
+                       continue;
+
+               /* skip if already in list */
+               msk = mptcp_sk(subflow->conn);
+               if (msk->dl_next || msk == head)
+                       continue;
+
+               msk->dl_next = head;
+               head = msk;
+       }
+       spin_unlock_bh(&queue->rskq_lock);
+       if (!head)
+               return;
+
+       /* can't acquire the msk socket lock under the subflow one,
+        * or will cause ABBA deadlock
+        */
+       release_sock(listener_ssk);
+
+       for (msk = head; msk; msk = next) {
+               struct sock *sk = (struct sock *)msk;
+               bool slow;
+
+               slow = lock_sock_fast_nested(sk);
+               next = msk->dl_next;
+               msk->first = NULL;
+               msk->dl_next = NULL;
+               unlock_sock_fast(sk, slow);
+       }
+
+       /* we are still under the listener msk socket lock */
+       lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
+}
+
 static int subflow_ulp_init(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
index 7881441..80713fe 100644 (file)
@@ -1803,7 +1803,8 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
        pdev = to_platform_device(dev->dev.parent);
        if (pdev) {
                np = pdev->dev.of_node;
-               if (np && of_get_property(np, "mlx,multi-host", NULL))
+               if (np && (of_get_property(np, "mellanox,multi-host", NULL) ||
+                          of_get_property(np, "mlx,multi-host", NULL)))
                        ndp->mlx_multi_host = true;
        }
 
index 082a2fd..369aeab 100644 (file)
@@ -729,6 +729,9 @@ static void nf_ct_gc_expired(struct nf_conn *ct)
        if (!refcount_inc_not_zero(&ct->ct_general.use))
                return;
 
+       /* load ->status after refcount increase */
+       smp_acquire__after_ctrl_dep();
+
        if (nf_ct_should_gc(ct))
                nf_ct_kill(ct);
 
@@ -795,6 +798,9 @@ __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
                 */
                ct = nf_ct_tuplehash_to_ctrack(h);
                if (likely(refcount_inc_not_zero(&ct->ct_general.use))) {
+                       /* re-check key after refcount */
+                       smp_acquire__after_ctrl_dep();
+
                        if (likely(nf_ct_key_equal(h, tuple, zone, net)))
                                goto found;
 
@@ -1387,6 +1393,9 @@ static unsigned int early_drop_list(struct net *net,
                if (!refcount_inc_not_zero(&tmp->ct_general.use))
                        continue;
 
+               /* load ->ct_net and ->status after refcount increase */
+               smp_acquire__after_ctrl_dep();
+
                /* kill only if still in same netns -- might have moved due to
                 * SLAB_TYPESAFE_BY_RCU rules.
                 *
@@ -1536,6 +1545,9 @@ static void gc_worker(struct work_struct *work)
                        if (!refcount_inc_not_zero(&tmp->ct_general.use))
                                continue;
 
+                       /* load ->status after refcount increase */
+                       smp_acquire__after_ctrl_dep();
+
                        if (gc_worker_skip_ct(tmp)) {
                                nf_ct_put(tmp);
                                continue;
@@ -1775,6 +1787,16 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
        if (!exp)
                __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
 
+       /* Other CPU might have obtained a pointer to this object before it was
+        * released.  Because refcount is 0, refcount_inc_not_zero() will fail.
+        *
+        * After refcount_set(1) it will succeed; ensure that zeroing of
+        * ct->status and the correct ct->net pointer are visible; else other
+        * core might observe CONFIRMED bit which means the entry is valid and
+        * in the hash table, but its not (anymore).
+        */
+       smp_wmb();
+
        /* Now it is going to be associated with an sk_buff, set refcount to 1. */
        refcount_set(&ct->ct_general.use, 1);
 
index 722af5e..f5905b5 100644 (file)
@@ -1203,6 +1203,7 @@ restart:
                                           hnnode) {
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        if (nf_ct_is_expired(ct)) {
+                               /* need to defer nf_ct_kill() until lock is released */
                                if (i < ARRAY_SIZE(nf_ct_evict) &&
                                    refcount_inc_not_zero(&ct->ct_general.use))
                                        nf_ct_evict[i++] = ct;
index 6ad7bbc..0589587 100644 (file)
@@ -306,6 +306,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
        if (unlikely(!refcount_inc_not_zero(&ct->ct_general.use)))
                return 0;
 
+       /* load ->status after refcount increase */
+       smp_acquire__after_ctrl_dep();
+
        if (nf_ct_should_gc(ct)) {
                nf_ct_kill(ct);
                goto release;
index 7873bd1..a8e2425 100644 (file)
 #include <net/netfilter/nf_tables_offload.h>
 #include <net/netfilter/nf_dup_netdev.h>
 
-static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev)
+#define NF_RECURSION_LIMIT     2
+
+static DEFINE_PER_CPU(u8, nf_dup_skb_recursion);
+
+static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev,
+                               enum nf_dev_hooks hook)
 {
-       if (skb_mac_header_was_set(skb))
+       if (__this_cpu_read(nf_dup_skb_recursion) > NF_RECURSION_LIMIT)
+               goto err;
+
+       if (hook == NF_NETDEV_INGRESS && skb_mac_header_was_set(skb)) {
+               if (skb_cow_head(skb, skb->mac_len))
+                       goto err;
+
                skb_push(skb, skb->mac_len);
+       }
 
        skb->dev = dev;
        skb_clear_tstamp(skb);
+       __this_cpu_inc(nf_dup_skb_recursion);
        dev_queue_xmit(skb);
+       __this_cpu_dec(nf_dup_skb_recursion);
+       return;
+err:
+       kfree_skb(skb);
 }
 
 void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif)
@@ -33,7 +50,7 @@ void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif)
                return;
        }
 
-       nf_do_netdev_egress(pkt->skb, dev);
+       nf_do_netdev_egress(pkt->skb, dev, nft_hook(pkt));
 }
 EXPORT_SYMBOL_GPL(nf_fwd_netdev_egress);
 
@@ -48,7 +65,7 @@ void nf_dup_netdev_egress(const struct nft_pktinfo *pkt, int oif)
 
        skb = skb_clone(pkt->skb, GFP_ATOMIC);
        if (skb)
-               nf_do_netdev_egress(skb, dev);
+               nf_do_netdev_egress(skb, dev, nft_hook(pkt));
 }
 EXPORT_SYMBOL_GPL(nf_dup_netdev_egress);
 
index 77bcb10..cb894f0 100644 (file)
@@ -67,7 +67,7 @@ dump_arp_packet(struct nf_log_buf *m,
        unsigned int logflags;
        struct arphdr _arph;
 
-       ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
+       ah = skb_header_pointer(skb, nhoff, sizeof(_arph), &_arph);
        if (!ah) {
                nf_log_buf_add(m, "TRUNCATED");
                return;
@@ -96,7 +96,7 @@ dump_arp_packet(struct nf_log_buf *m,
            ah->ar_pln != sizeof(__be32))
                return;
 
-       ap = skb_header_pointer(skb, sizeof(_arph), sizeof(_arpp), &_arpp);
+       ap = skb_header_pointer(skb, nhoff + sizeof(_arph), sizeof(_arpp), &_arpp);
        if (!ap) {
                nf_log_buf_add(m, " INCOMPLETE [%zu bytes]",
                               skb->len - sizeof(_arph));
@@ -149,7 +149,7 @@ static void nf_log_arp_packet(struct net *net, u_int8_t pf,
 
        nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
                                  prefix);
-       dump_arp_packet(m, loginfo, skb, 0);
+       dump_arp_packet(m, loginfo, skb, skb_network_offset(skb));
 
        nf_log_buf_close(m);
 }
@@ -850,7 +850,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf,
        if (in)
                dump_mac_header(m, loginfo, skb);
 
-       dump_ipv4_packet(net, m, loginfo, skb, 0);
+       dump_ipv4_packet(net, m, loginfo, skb, skb_network_offset(skb));
 
        nf_log_buf_close(m);
 }
index e479dd0..16915f8 100644 (file)
@@ -405,7 +405,7 @@ synproxy_build_ip(struct net *net, struct sk_buff *skb, __be32 saddr,
        iph->tos        = 0;
        iph->id         = 0;
        iph->frag_off   = htons(IP_DF);
-       iph->ttl        = net->ipv4.sysctl_ip_default_ttl;
+       iph->ttl        = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
        iph->protocol   = IPPROTO_TCP;
        iph->check      = 0;
        iph->saddr      = saddr;
index 51144fc..9f976b1 100644 (file)
@@ -3340,6 +3340,8 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
                        if (err < 0)
                                return err;
                }
+
+               cond_resched();
        }
 
        return 0;
@@ -5213,13 +5215,20 @@ static int nft_setelem_parse_data(struct nft_ctx *ctx, struct nft_set *set,
                                  struct nft_data *data,
                                  struct nlattr *attr)
 {
+       u32 dtype;
        int err;
 
        err = nft_data_init(ctx, data, NFT_DATA_VALUE_MAXLEN, desc, attr);
        if (err < 0)
                return err;
 
-       if (desc->type != NFT_DATA_VERDICT && desc->len != set->dlen) {
+       if (set->dtype == NFT_DATA_VERDICT)
+               dtype = NFT_DATA_VERDICT;
+       else
+               dtype = NFT_DATA_VALUE;
+
+       if (dtype != desc->type ||
+           set->dlen != desc->len) {
                nft_data_release(data, desc->type);
                return -EINVAL;
        }
@@ -5826,8 +5835,11 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
        if (!nla[NFTA_SET_ELEM_KEY] && !(flags & NFT_SET_ELEM_CATCHALL))
                return -EINVAL;
 
-       if (flags != 0)
-               nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
+       if (flags != 0) {
+               err = nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
+               if (err < 0)
+                       return err;
+       }
 
        if (set->flags & NFT_SET_MAP) {
                if (nla[NFTA_SET_ELEM_DATA] == NULL &&
@@ -5936,7 +5948,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                if (err < 0)
                        goto err_set_elem_expr;
 
-               nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
+               err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
+               if (err < 0)
+                       goto err_parse_key;
        }
 
        if (nla[NFTA_SET_ELEM_KEY_END]) {
@@ -5945,22 +5959,31 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                if (err < 0)
                        goto err_parse_key;
 
-               nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen);
+               err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen);
+               if (err < 0)
+                       goto err_parse_key_end;
        }
 
        if (timeout > 0) {
-               nft_set_ext_add(&tmpl, NFT_SET_EXT_EXPIRATION);
-               if (timeout != set->timeout)
-                       nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
+               err = nft_set_ext_add(&tmpl, NFT_SET_EXT_EXPIRATION);
+               if (err < 0)
+                       goto err_parse_key_end;
+
+               if (timeout != set->timeout) {
+                       err = nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
+                       if (err < 0)
+                               goto err_parse_key_end;
+               }
        }
 
        if (num_exprs) {
                for (i = 0; i < num_exprs; i++)
                        size += expr_array[i]->ops->size;
 
-               nft_set_ext_add_length(&tmpl, NFT_SET_EXT_EXPRESSIONS,
-                                      sizeof(struct nft_set_elem_expr) +
-                                      size);
+               err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_EXPRESSIONS,
+                                            sizeof(struct nft_set_elem_expr) + size);
+               if (err < 0)
+                       goto err_parse_key_end;
        }
 
        if (nla[NFTA_SET_ELEM_OBJREF] != NULL) {
@@ -5975,7 +5998,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                        err = PTR_ERR(obj);
                        goto err_parse_key_end;
                }
-               nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF);
+               err = nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF);
+               if (err < 0)
+                       goto err_parse_key_end;
        }
 
        if (nla[NFTA_SET_ELEM_DATA] != NULL) {
@@ -6009,7 +6034,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                                                          NFT_VALIDATE_NEED);
                }
 
-               nft_set_ext_add_length(&tmpl, NFT_SET_EXT_DATA, desc.len);
+               err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_DATA, desc.len);
+               if (err < 0)
+                       goto err_parse_data;
        }
 
        /* The full maximum length of userdata can exceed the maximum
@@ -6019,9 +6046,12 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
        ulen = 0;
        if (nla[NFTA_SET_ELEM_USERDATA] != NULL) {
                ulen = nla_len(nla[NFTA_SET_ELEM_USERDATA]);
-               if (ulen > 0)
-                       nft_set_ext_add_length(&tmpl, NFT_SET_EXT_USERDATA,
-                                              ulen);
+               if (ulen > 0) {
+                       err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_USERDATA,
+                                                    ulen);
+                       if (err < 0)
+                               goto err_parse_data;
+               }
        }
 
        err = -ENOMEM;
@@ -6249,8 +6279,11 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
 
        nft_set_ext_prepare(&tmpl);
 
-       if (flags != 0)
-               nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
+       if (flags != 0) {
+               err = nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
+               if (err < 0)
+                       return err;
+       }
 
        if (nla[NFTA_SET_ELEM_KEY]) {
                err = nft_setelem_parse_key(ctx, set, &elem.key.val,
@@ -6258,16 +6291,20 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
                if (err < 0)
                        return err;
 
-               nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
+               err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
+               if (err < 0)
+                       goto fail_elem;
        }
 
        if (nla[NFTA_SET_ELEM_KEY_END]) {
                err = nft_setelem_parse_key(ctx, set, &elem.key_end.val,
                                            nla[NFTA_SET_ELEM_KEY_END]);
                if (err < 0)
-                       return err;
+                       goto fail_elem;
 
-               nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen);
+               err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen);
+               if (err < 0)
+                       goto fail_elem_key_end;
        }
 
        err = -ENOMEM;
@@ -6275,7 +6312,7 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
                                      elem.key_end.val.data, NULL, 0, 0,
                                      GFP_KERNEL_ACCOUNT);
        if (elem.priv == NULL)
-               goto fail_elem;
+               goto fail_elem_key_end;
 
        ext = nft_set_elem_ext(set, elem.priv);
        if (flags)
@@ -6299,6 +6336,8 @@ fail_ops:
        kfree(trans);
 fail_trans:
        kfree(elem.priv);
+fail_elem_key_end:
+       nft_data_release(&elem.key_end.val, NFT_DATA_VALUE);
 fail_elem:
        nft_data_release(&elem.key.val, NFT_DATA_VALUE);
        return err;
@@ -9330,9 +9369,13 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
                                break;
                        }
                }
+
+               cond_resched();
        }
 
        list_for_each_entry(set, &ctx->table->sets, list) {
+               cond_resched();
+
                if (!nft_is_active_next(ctx->net, set))
                        continue;
                if (!(set->flags & NFT_SET_MAP) ||
index 53f40e4..3ddce24 100644 (file)
@@ -25,9 +25,7 @@ static noinline void __nft_trace_packet(struct nft_traceinfo *info,
                                        const struct nft_chain *chain,
                                        enum nft_trace_types type)
 {
-       const struct nft_pktinfo *pkt = info->pkt;
-
-       if (!info->trace || !pkt->skb->nf_trace)
+       if (!info->trace || !info->nf_trace)
                return;
 
        info->chain = chain;
@@ -42,11 +40,24 @@ static inline void nft_trace_packet(struct nft_traceinfo *info,
                                    enum nft_trace_types type)
 {
        if (static_branch_unlikely(&nft_trace_enabled)) {
+               const struct nft_pktinfo *pkt = info->pkt;
+
+               info->nf_trace = pkt->skb->nf_trace;
                info->rule = rule;
                __nft_trace_packet(info, chain, type);
        }
 }
 
+static inline void nft_trace_copy_nftrace(struct nft_traceinfo *info)
+{
+       if (static_branch_unlikely(&nft_trace_enabled)) {
+               const struct nft_pktinfo *pkt = info->pkt;
+
+               if (info->trace)
+                       info->nf_trace = pkt->skb->nf_trace;
+       }
+}
+
 static void nft_bitwise_fast_eval(const struct nft_expr *expr,
                                  struct nft_regs *regs)
 {
@@ -85,6 +96,7 @@ static noinline void __nft_trace_verdict(struct nft_traceinfo *info,
                                         const struct nft_chain *chain,
                                         const struct nft_regs *regs)
 {
+       const struct nft_pktinfo *pkt = info->pkt;
        enum nft_trace_types type;
 
        switch (regs->verdict.code) {
@@ -92,8 +104,13 @@ static noinline void __nft_trace_verdict(struct nft_traceinfo *info,
        case NFT_RETURN:
                type = NFT_TRACETYPE_RETURN;
                break;
+       case NF_STOLEN:
+               type = NFT_TRACETYPE_RULE;
+               /* can't access skb->nf_trace; use copy */
+               break;
        default:
                type = NFT_TRACETYPE_RULE;
+               info->nf_trace = pkt->skb->nf_trace;
                break;
        }
 
@@ -254,6 +271,7 @@ next_rule:
                switch (regs.verdict.code) {
                case NFT_BREAK:
                        regs.verdict.code = NFT_CONTINUE;
+                       nft_trace_copy_nftrace(&info);
                        continue;
                case NFT_CONTINUE:
                        nft_trace_packet(&info, chain, rule,
index 5041725..1163ba9 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/module.h>
 #include <linux/static_key.h>
 #include <linux/hash.h>
-#include <linux/jhash.h>
+#include <linux/siphash.h>
 #include <linux/if_vlan.h>
 #include <linux/init.h>
 #include <linux/skbuff.h>
 DEFINE_STATIC_KEY_FALSE(nft_trace_enabled);
 EXPORT_SYMBOL_GPL(nft_trace_enabled);
 
-static int trace_fill_id(struct sk_buff *nlskb, struct sk_buff *skb)
-{
-       __be32 id;
-
-       /* using skb address as ID results in a limited number of
-        * values (and quick reuse).
-        *
-        * So we attempt to use as many skb members that will not
-        * change while skb is with netfilter.
-        */
-       id = (__be32)jhash_2words(hash32_ptr(skb), skb_get_hash(skb),
-                                 skb->skb_iif);
-
-       return nla_put_be32(nlskb, NFTA_TRACE_ID, id);
-}
-
 static int trace_fill_header(struct sk_buff *nlskb, u16 type,
                             const struct sk_buff *skb,
                             int off, unsigned int len)
@@ -186,6 +170,7 @@ void nft_trace_notify(struct nft_traceinfo *info)
        struct nlmsghdr *nlh;
        struct sk_buff *skb;
        unsigned int size;
+       u32 mark = 0;
        u16 event;
 
        if (!nfnetlink_has_listeners(nft_net(pkt), NFNLGRP_NFTRACE))
@@ -229,7 +214,7 @@ void nft_trace_notify(struct nft_traceinfo *info)
        if (nla_put_be32(skb, NFTA_TRACE_TYPE, htonl(info->type)))
                goto nla_put_failure;
 
-       if (trace_fill_id(skb, pkt->skb))
+       if (nla_put_u32(skb, NFTA_TRACE_ID, info->skbid))
                goto nla_put_failure;
 
        if (nla_put_string(skb, NFTA_TRACE_CHAIN, info->chain->name))
@@ -249,16 +234,24 @@ void nft_trace_notify(struct nft_traceinfo *info)
        case NFT_TRACETYPE_RULE:
                if (nft_verdict_dump(skb, NFTA_TRACE_VERDICT, info->verdict))
                        goto nla_put_failure;
+
+               /* pkt->skb undefined iff NF_STOLEN, disable dump */
+               if (info->verdict->code == NF_STOLEN)
+                       info->packet_dumped = true;
+               else
+                       mark = pkt->skb->mark;
+
                break;
        case NFT_TRACETYPE_POLICY:
+               mark = pkt->skb->mark;
+
                if (nla_put_be32(skb, NFTA_TRACE_POLICY,
                                 htonl(info->basechain->policy)))
                        goto nla_put_failure;
                break;
        }
 
-       if (pkt->skb->mark &&
-           nla_put_be32(skb, NFTA_TRACE_MARK, htonl(pkt->skb->mark)))
+       if (mark && nla_put_be32(skb, NFTA_TRACE_MARK, htonl(mark)))
                goto nla_put_failure;
 
        if (!info->packet_dumped) {
@@ -283,9 +276,20 @@ void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt,
                    const struct nft_verdict *verdict,
                    const struct nft_chain *chain)
 {
+       static siphash_key_t trace_key __read_mostly;
+       struct sk_buff *skb = pkt->skb;
+
        info->basechain = nft_base_chain(chain);
        info->trace = true;
+       info->nf_trace = pkt->skb->nf_trace;
        info->packet_dumped = false;
        info->pkt = pkt;
        info->verdict = verdict;
+
+       net_get_random_once(&trace_key, sizeof(trace_key));
+
+       info->skbid = (u32)siphash_3u32(hash32_ptr(skb),
+                                       skb_get_hash(skb),
+                                       skb->skb_iif,
+                                       &trace_key);
 }
index af15102..f466af4 100644 (file)
@@ -614,7 +614,7 @@ static void __net_exit cttimeout_net_exit(struct net *net)
 
        nf_ct_untimeout(net, NULL);
 
-       list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_freelist, head) {
+       list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_freelist, free_head) {
                list_del(&cur->free_head);
 
                if (refcount_dec_and_test(&cur->refcnt))
index a364f8e..87a9009 100644 (file)
@@ -843,11 +843,16 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
 }
 
 static int
-nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
+nfqnl_mangle(void *data, unsigned int data_len, struct nf_queue_entry *e, int diff)
 {
        struct sk_buff *nskb;
 
        if (diff < 0) {
+               unsigned int min_len = skb_transport_offset(e->skb);
+
+               if (data_len < min_len)
+                       return -EINVAL;
+
                if (pskb_trim(e->skb, data_len))
                        return -ENOMEM;
        } else if (diff > 0) {
index ac48592..55d2d49 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
+#include <linux/random.h>
 #include <linux/smp.h>
 #include <linux/static_key.h>
 #include <net/dst.h>
@@ -32,8 +33,6 @@
 #define NFT_META_SECS_PER_DAY          86400
 #define NFT_META_DAYS_PER_WEEK         7
 
-static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state);
-
 static u8 nft_meta_weekday(void)
 {
        time64_t secs = ktime_get_real_seconds();
@@ -271,13 +270,6 @@ static bool nft_meta_get_eval_ifname(enum nft_meta_keys key, u32 *dest,
        return true;
 }
 
-static noinline u32 nft_prandom_u32(void)
-{
-       struct rnd_state *state = this_cpu_ptr(&nft_prandom_state);
-
-       return prandom_u32_state(state);
-}
-
 #ifdef CONFIG_IP_ROUTE_CLASSID
 static noinline bool
 nft_meta_get_eval_rtclassid(const struct sk_buff *skb, u32 *dest)
@@ -389,7 +381,7 @@ void nft_meta_get_eval(const struct nft_expr *expr,
                break;
 #endif
        case NFT_META_PRANDOM:
-               *dest = nft_prandom_u32();
+               *dest = get_random_u32();
                break;
 #ifdef CONFIG_XFRM
        case NFT_META_SECPATH:
@@ -518,7 +510,6 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
                len = IFNAMSIZ;
                break;
        case NFT_META_PRANDOM:
-               prandom_init_once(&nft_prandom_state);
                len = sizeof(u32);
                break;
 #ifdef CONFIG_XFRM
index 81b40c6..45d3dc9 100644 (file)
@@ -9,12 +9,11 @@
 #include <linux/netlink.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter/nf_tables.h>
+#include <linux/random.h>
 #include <linux/static_key.h>
 #include <net/netfilter/nf_tables.h>
 #include <net/netfilter/nf_tables_core.h>
 
-static DEFINE_PER_CPU(struct rnd_state, nft_numgen_prandom_state);
-
 struct nft_ng_inc {
        u8                      dreg;
        u32                     modulus;
@@ -135,12 +134,9 @@ struct nft_ng_random {
        u32                     offset;
 };
 
-static u32 nft_ng_random_gen(struct nft_ng_random *priv)
+static u32 nft_ng_random_gen(const struct nft_ng_random *priv)
 {
-       struct rnd_state *state = this_cpu_ptr(&nft_numgen_prandom_state);
-
-       return reciprocal_scale(prandom_u32_state(state), priv->modulus) +
-              priv->offset;
+       return reciprocal_scale(get_random_u32(), priv->modulus) + priv->offset;
 }
 
 static void nft_ng_random_eval(const struct nft_expr *expr,
@@ -168,8 +164,6 @@ static int nft_ng_random_init(const struct nft_ctx *ctx,
        if (priv->offset + priv->modulus - 1 < priv->offset)
                return -EOVERFLOW;
 
-       prandom_init_once(&nft_numgen_prandom_state);
-
        return nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg,
                                        NULL, NFT_DATA_VALUE, sizeof(u32));
 }
index 15e4b76..da29e92 100644 (file)
@@ -68,6 +68,31 @@ static void nft_queue_sreg_eval(const struct nft_expr *expr,
        regs->verdict.code = ret;
 }
 
+static int nft_queue_validate(const struct nft_ctx *ctx,
+                             const struct nft_expr *expr,
+                             const struct nft_data **data)
+{
+       static const unsigned int supported_hooks = ((1 << NF_INET_PRE_ROUTING) |
+                                                    (1 << NF_INET_LOCAL_IN) |
+                                                    (1 << NF_INET_FORWARD) |
+                                                    (1 << NF_INET_LOCAL_OUT) |
+                                                    (1 << NF_INET_POST_ROUTING));
+
+       switch (ctx->family) {
+       case NFPROTO_IPV4:
+       case NFPROTO_IPV6:
+       case NFPROTO_INET:
+       case NFPROTO_BRIDGE:
+               break;
+       case NFPROTO_NETDEV: /* lacks okfn */
+               fallthrough;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return nft_chain_validate_hooks(ctx->chain, supported_hooks);
+}
+
 static const struct nla_policy nft_queue_policy[NFTA_QUEUE_MAX + 1] = {
        [NFTA_QUEUE_NUM]        = { .type = NLA_U16 },
        [NFTA_QUEUE_TOTAL]      = { .type = NLA_U16 },
@@ -164,6 +189,7 @@ static const struct nft_expr_ops nft_queue_ops = {
        .eval           = nft_queue_eval,
        .init           = nft_queue_init,
        .dump           = nft_queue_dump,
+       .validate       = nft_queue_validate,
        .reduce         = NFT_REDUCE_READONLY,
 };
 
@@ -173,6 +199,7 @@ static const struct nft_expr_ops nft_queue_sreg_ops = {
        .eval           = nft_queue_sreg_eval,
        .init           = nft_queue_sreg_init,
        .dump           = nft_queue_sreg_dump,
+       .validate       = nft_queue_validate,
        .reduce         = NFT_REDUCE_READONLY,
 };
 
index df40314..76de6c8 100644 (file)
@@ -143,6 +143,7 @@ static bool nft_rhash_update(struct nft_set *set, const u32 *key,
        /* Another cpu may race to insert the element with the same key */
        if (prev) {
                nft_set_elem_destroy(set, he, true);
+               atomic_dec(&set->nelems);
                he = prev;
        }
 
@@ -152,6 +153,7 @@ out:
 
 err2:
        nft_set_elem_destroy(set, he, true);
+       atomic_dec(&set->nelems);
 err1:
        return false;
 }
index 2c8051d..4f9299b 100644 (file)
@@ -2125,6 +2125,32 @@ out_scratch:
 }
 
 /**
+ * nft_set_pipapo_match_destroy() - Destroy elements from key mapping array
+ * @set:       nftables API set representation
+ * @m:         matching data pointing to key mapping array
+ */
+static void nft_set_pipapo_match_destroy(const struct nft_set *set,
+                                        struct nft_pipapo_match *m)
+{
+       struct nft_pipapo_field *f;
+       int i, r;
+
+       for (i = 0, f = m->f; i < m->field_count - 1; i++, f++)
+               ;
+
+       for (r = 0; r < f->rules; r++) {
+               struct nft_pipapo_elem *e;
+
+               if (r < f->rules - 1 && f->mt[r + 1].e == f->mt[r].e)
+                       continue;
+
+               e = f->mt[r].e;
+
+               nft_set_elem_destroy(set, e, true);
+       }
+}
+
+/**
  * nft_pipapo_destroy() - Free private data for set and all committed elements
  * @set:       nftables API set representation
  */
@@ -2132,26 +2158,13 @@ static void nft_pipapo_destroy(const struct nft_set *set)
 {
        struct nft_pipapo *priv = nft_set_priv(set);
        struct nft_pipapo_match *m;
-       struct nft_pipapo_field *f;
-       int i, r, cpu;
+       int cpu;
 
        m = rcu_dereference_protected(priv->match, true);
        if (m) {
                rcu_barrier();
 
-               for (i = 0, f = m->f; i < m->field_count - 1; i++, f++)
-                       ;
-
-               for (r = 0; r < f->rules; r++) {
-                       struct nft_pipapo_elem *e;
-
-                       if (r < f->rules - 1 && f->mt[r + 1].e == f->mt[r].e)
-                               continue;
-
-                       e = f->mt[r].e;
-
-                       nft_set_elem_destroy(set, e, true);
-               }
+               nft_set_pipapo_match_destroy(set, m);
 
 #ifdef NFT_PIPAPO_ALIGN
                free_percpu(m->scratch_aligned);
@@ -2165,6 +2178,11 @@ static void nft_pipapo_destroy(const struct nft_set *set)
        }
 
        if (priv->clone) {
+               m = priv->clone;
+
+               if (priv->dirty)
+                       nft_set_pipapo_match_destroy(set, m);
+
 #ifdef NFT_PIPAPO_ALIGN
                free_percpu(priv->clone->scratch_aligned);
 #endif
index 372bf54..e20d1a9 100644 (file)
@@ -407,7 +407,7 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
        if (flags & IP6_FH_F_FRAG) {
                if (frag_off) {
                        key->ip.frag = OVS_FRAG_TYPE_LATER;
-                       key->ip.proto = nexthdr;
+                       key->ip.proto = NEXTHDR_FRAGMENT;
                        return 0;
                }
                key->ip.frag = OVS_FRAG_TYPE_FIRST;
index fee6409..eb0b819 100644 (file)
@@ -227,8 +227,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
 {
        struct rose_neigh *s;
 
-       rose_stop_ftimer(rose_neigh);
-       rose_stop_t0timer(rose_neigh);
+       del_timer_sync(&rose_neigh->ftimer);
+       del_timer_sync(&rose_neigh->t0timer);
 
        skb_queue_purge(&rose_neigh->queue);
 
index b3138fc..f06ddbe 100644 (file)
@@ -31,89 +31,89 @@ static void rose_idletimer_expiry(struct timer_list *);
 
 void rose_start_heartbeat(struct sock *sk)
 {
-       del_timer(&sk->sk_timer);
+       sk_stop_timer(sk, &sk->sk_timer);
 
        sk->sk_timer.function = rose_heartbeat_expiry;
        sk->sk_timer.expires  = jiffies + 5 * HZ;
 
-       add_timer(&sk->sk_timer);
+       sk_reset_timer(sk, &sk->sk_timer, sk->sk_timer.expires);
 }
 
 void rose_start_t1timer(struct sock *sk)
 {
        struct rose_sock *rose = rose_sk(sk);
 
-       del_timer(&rose->timer);
+       sk_stop_timer(sk, &rose->timer);
 
        rose->timer.function = rose_timer_expiry;
        rose->timer.expires  = jiffies + rose->t1;
 
-       add_timer(&rose->timer);
+       sk_reset_timer(sk, &rose->timer, rose->timer.expires);
 }
 
 void rose_start_t2timer(struct sock *sk)
 {
        struct rose_sock *rose = rose_sk(sk);
 
-       del_timer(&rose->timer);
+       sk_stop_timer(sk, &rose->timer);
 
        rose->timer.function = rose_timer_expiry;
        rose->timer.expires  = jiffies + rose->t2;
 
-       add_timer(&rose->timer);
+       sk_reset_timer(sk, &rose->timer, rose->timer.expires);
 }
 
 void rose_start_t3timer(struct sock *sk)
 {
        struct rose_sock *rose = rose_sk(sk);
 
-       del_timer(&rose->timer);
+       sk_stop_timer(sk, &rose->timer);
 
        rose->timer.function = rose_timer_expiry;
        rose->timer.expires  = jiffies + rose->t3;
 
-       add_timer(&rose->timer);
+       sk_reset_timer(sk, &rose->timer, rose->timer.expires);
 }
 
 void rose_start_hbtimer(struct sock *sk)
 {
        struct rose_sock *rose = rose_sk(sk);
 
-       del_timer(&rose->timer);
+       sk_stop_timer(sk, &rose->timer);
 
        rose->timer.function = rose_timer_expiry;
        rose->timer.expires  = jiffies + rose->hb;
 
-       add_timer(&rose->timer);
+       sk_reset_timer(sk, &rose->timer, rose->timer.expires);
 }
 
 void rose_start_idletimer(struct sock *sk)
 {
        struct rose_sock *rose = rose_sk(sk);
 
-       del_timer(&rose->idletimer);
+       sk_stop_timer(sk, &rose->idletimer);
 
        if (rose->idle > 0) {
                rose->idletimer.function = rose_idletimer_expiry;
                rose->idletimer.expires  = jiffies + rose->idle;
 
-               add_timer(&rose->idletimer);
+               sk_reset_timer(sk, &rose->idletimer, rose->idletimer.expires);
        }
 }
 
 void rose_stop_heartbeat(struct sock *sk)
 {
-       del_timer(&sk->sk_timer);
+       sk_stop_timer(sk, &sk->sk_timer);
 }
 
 void rose_stop_timer(struct sock *sk)
 {
-       del_timer(&rose_sk(sk)->timer);
+       sk_stop_timer(sk, &rose_sk(sk)->timer);
 }
 
 void rose_stop_idletimer(struct sock *sk)
 {
-       del_timer(&rose_sk(sk)->idletimer);
+       sk_stop_timer(sk, &rose_sk(sk)->idletimer);
 }
 
 static void rose_heartbeat_expiry(struct timer_list *t)
@@ -130,6 +130,7 @@ static void rose_heartbeat_expiry(struct timer_list *t)
                    (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
                        bh_unlock_sock(sk);
                        rose_destroy_socket(sk);
+                       sock_put(sk);
                        return;
                }
                break;
@@ -152,6 +153,7 @@ static void rose_heartbeat_expiry(struct timer_list *t)
 
        rose_start_heartbeat(sk);
        bh_unlock_sock(sk);
+       sock_put(sk);
 }
 
 static void rose_timer_expiry(struct timer_list *t)
@@ -181,6 +183,7 @@ static void rose_timer_expiry(struct timer_list *t)
                break;
        }
        bh_unlock_sock(sk);
+       sock_put(sk);
 }
 
 static void rose_idletimer_expiry(struct timer_list *t)
@@ -205,4 +208,5 @@ static void rose_idletimer_expiry(struct timer_list *t)
                sock_set_flag(sk, SOCK_DEAD);
        }
        bh_unlock_sock(sk);
+       sock_put(sk);
 }
index da9733d..817065a 100644 (file)
@@ -588,7 +588,8 @@ static int tcf_idr_release_unsafe(struct tc_action *p)
 }
 
 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
-                         const struct tc_action_ops *ops)
+                         const struct tc_action_ops *ops,
+                         struct netlink_ext_ack *extack)
 {
        struct nlattr *nest;
        int n_i = 0;
@@ -604,20 +605,25 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
        if (nla_put_string(skb, TCA_KIND, ops->kind))
                goto nla_put_failure;
 
+       ret = 0;
        mutex_lock(&idrinfo->lock);
        idr_for_each_entry_ul(idr, p, tmp, id) {
                if (IS_ERR(p))
                        continue;
                ret = tcf_idr_release_unsafe(p);
-               if (ret == ACT_P_DELETED) {
+               if (ret == ACT_P_DELETED)
                        module_put(ops->owner);
-                       n_i++;
-               } else if (ret < 0) {
-                       mutex_unlock(&idrinfo->lock);
-                       goto nla_put_failure;
-               }
+               else if (ret < 0)
+                       break;
+               n_i++;
        }
        mutex_unlock(&idrinfo->lock);
+       if (ret < 0) {
+               if (n_i)
+                       NL_SET_ERR_MSG(extack, "Unable to flush all TC actions");
+               else
+                       goto nla_put_failure;
+       }
 
        ret = nla_put_u32(skb, TCA_FCNT, n_i);
        if (ret)
@@ -638,7 +644,7 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
        struct tcf_idrinfo *idrinfo = tn->idrinfo;
 
        if (type == RTM_DELACTION) {
-               return tcf_del_walker(idrinfo, skb, ops);
+               return tcf_del_walker(idrinfo, skb, ops, extack);
        } else if (type == RTM_GETACTION) {
                return tcf_dump_walker(idrinfo, skb, cb);
        } else {
index 79c8901..b759628 100644 (file)
@@ -442,7 +442,7 @@ static int tcf_police_act_to_flow_act(int tc_act, u32 *extval,
                act_id = FLOW_ACTION_JUMP;
                *extval = tc_act & TC_ACT_EXT_VAL_MASK;
        } else if (tc_act == TC_ACT_UNSPEC) {
-               NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform/exceed action is \"continue\"");
+               act_id = FLOW_ACTION_CONTINUE;
        } else {
                NL_SET_ERR_MSG_MOD(extack, "Unsupported conform/exceed action offload");
        }
index 9bb4d3d..ac366c9 100644 (file)
@@ -3533,7 +3533,7 @@ int tc_setup_action(struct flow_action *flow_action,
                    struct tc_action *actions[],
                    struct netlink_ext_ack *extack)
 {
-       int i, j, index, err = 0;
+       int i, j, k, index, err = 0;
        struct tc_action *act;
 
        BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
@@ -3553,14 +3553,18 @@ int tc_setup_action(struct flow_action *flow_action,
                if (err)
                        goto err_out_locked;
 
-               entry->hw_stats = tc_act_hw_stats(act->hw_stats);
-               entry->hw_index = act->tcfa_index;
                index = 0;
                err = tc_setup_offload_act(act, entry, &index, extack);
-               if (!err)
-                       j += index;
-               else
+               if (err)
                        goto err_out_locked;
+
+               for (k = 0; k < index ; k++) {
+                       entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
+                       entry[k].hw_index = act->tcfa_index;
+               }
+
+               j += index;
+
                spin_unlock_bh(&act->tcfa_lock);
        }
 
index ed4ccef..5449ed1 100644 (file)
@@ -1146,9 +1146,9 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
        struct tc_netem_rate rate;
        struct tc_netem_slot slot;
 
-       qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
+       qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency),
                             UINT_MAX);
-       qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
+       qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter),
                            UINT_MAX);
        qopt.limit = q->limit;
        qopt.loss = q->loss;
index be29da0..3460abc 100644 (file)
@@ -229,9 +229,8 @@ static struct sctp_association *sctp_association_init(
        if (!sctp_ulpq_init(&asoc->ulpq, asoc))
                goto fail_init;
 
-       if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,
-                            0, gfp))
-               goto fail_init;
+       if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp))
+               goto stream_free;
 
        /* Initialize default path MTU. */
        asoc->pathmtu = sp->pathmtu;
index 35928fe..1a094b0 100644 (file)
@@ -358,7 +358,7 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
        if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) &&
           ret != RTN_LOCAL &&
           !sp->inet.freebind &&
-          !net->ipv4.sysctl_ip_nonlocal_bind)
+           !READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind))
                return 0;
 
        if (ipv6_only_sock(sctp_opt2sk(sp)))
index 6dc95dc..ef9fcea 100644 (file)
@@ -137,7 +137,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
 
        ret = sctp_stream_alloc_out(stream, outcnt, gfp);
        if (ret)
-               goto out_err;
+               return ret;
 
        for (i = 0; i < stream->outcnt; i++)
                SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
@@ -145,22 +145,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
 handle_in:
        sctp_stream_interleave_init(stream);
        if (!incnt)
-               goto out;
-
-       ret = sctp_stream_alloc_in(stream, incnt, gfp);
-       if (ret)
-               goto in_err;
-
-       goto out;
+               return 0;
 
-in_err:
-       sched->free(stream);
-       genradix_free(&stream->in);
-out_err:
-       genradix_free(&stream->out);
-       stream->outcnt = 0;
-out:
-       return ret;
+       return sctp_stream_alloc_in(stream, incnt, gfp);
 }
 
 int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
index 518b1b9..1ad565e 100644 (file)
@@ -160,7 +160,7 @@ int sctp_sched_set_sched(struct sctp_association *asoc,
                if (!SCTP_SO(&asoc->stream, i)->ext)
                        continue;
 
-               ret = n->init_sid(&asoc->stream, i, GFP_KERNEL);
+               ret = n->init_sid(&asoc->stream, i, GFP_ATOMIC);
                if (ret)
                        goto err;
        }
index c4d057b..0bde36b 100644 (file)
@@ -2122,7 +2122,7 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
        init_waitqueue_head(&lgr->llc_flow_waiter);
        init_waitqueue_head(&lgr->llc_msg_waiter);
        mutex_init(&lgr->llc_conf_mutex);
-       lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time;
+       lgr->llc_testlink_time = READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
 }
 
 /* called after lgr was removed from lgr_list */
index 2bc8773..96300cd 100644 (file)
@@ -2149,10 +2149,13 @@ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
 int __sys_recvfrom(int fd, void __user *ubuf, size_t size, unsigned int flags,
                   struct sockaddr __user *addr, int __user *addr_len)
 {
+       struct sockaddr_storage address;
+       struct msghdr msg = {
+               /* Save some cycles and don't copy the address if not needed */
+               .msg_name = addr ? (struct sockaddr *)&address : NULL,
+       };
        struct socket *sock;
        struct iovec iov;
-       struct msghdr msg;
-       struct sockaddr_storage address;
        int err, err2;
        int fput_needed;
 
@@ -2163,14 +2166,6 @@ int __sys_recvfrom(int fd, void __user *ubuf, size_t size, unsigned int flags,
        if (!sock)
                goto out;
 
-       msg.msg_control = NULL;
-       msg.msg_controllen = 0;
-       /* Save some cycles and don't copy the address if not needed */
-       msg.msg_name = addr ? (struct sockaddr *)&address : NULL;
-       /* We assume all kernel code knows the size of sockaddr_storage */
-       msg.msg_namelen = 0;
-       msg.msg_iocb = NULL;
-       msg.msg_flags = 0;
        if (sock->file->f_flags & O_NONBLOCK)
                flags |= MSG_DONTWAIT;
        err = sock_recvmsg(sock, &msg, flags);
@@ -2375,6 +2370,7 @@ int __copy_msghdr_from_user(struct msghdr *kmsg,
                return -EFAULT;
 
        kmsg->msg_control_is_user = true;
+       kmsg->msg_get_inq = 0;
        kmsg->msg_control_user = msg.msg_control;
        kmsg->msg_controllen = msg.msg_controllen;
        kmsg->msg_flags = msg.msg_flags;
index e2c6eca..b6781ad 100644 (file)
@@ -651,6 +651,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
        new->cl_discrtry = clnt->cl_discrtry;
        new->cl_chatty = clnt->cl_chatty;
        new->cl_principal = clnt->cl_principal;
+       new->cl_max_connect = clnt->cl_max_connect;
        return new;
 
 out_err:
index f87a2d8..5d2b3e6 100644 (file)
@@ -984,7 +984,7 @@ static noinline __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
        p = page_address(*xdr->page_ptr);
        xdr->p = p + frag2bytes;
        space_left = xdr->buf->buflen - xdr->buf->len;
-       if (space_left - nbytes >= PAGE_SIZE)
+       if (space_left - frag1bytes >= PAGE_SIZE)
                xdr->end = p + PAGE_SIZE;
        else
                xdr->end = p + space_left - frag1bytes;
index 3f4542e..434e70e 100644 (file)
@@ -109,10 +109,9 @@ static void __net_exit tipc_exit_net(struct net *net)
        struct tipc_net *tn = tipc_net(net);
 
        tipc_detach_loopback(net);
+       tipc_net_stop(net);
        /* Make sure the tipc_net_finalize_work() finished */
        cancel_work_sync(&tn->work);
-       tipc_net_stop(net);
-
        tipc_bcast_stop(net);
        tipc_nametbl_stop(net);
        tipc_sk_rht_destroy(net);
index 6ef95ce..b48d97c 100644 (file)
@@ -472,8 +472,8 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
                                   bool preliminary)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *l, *snd_l = tipc_bc_sndlink(net);
        struct tipc_node *n, *temp_node;
-       struct tipc_link *l;
        unsigned long intv;
        int bearer_id;
        int i;
@@ -488,6 +488,16 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
                        goto exit;
                /* A preliminary node becomes "real" now, refresh its data */
                tipc_node_write_lock(n);
+               if (!tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX,
+                                        tipc_link_min_win(snd_l), tipc_link_max_win(snd_l),
+                                        n->capabilities, &n->bc_entry.inputq1,
+                                        &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
+                       pr_warn("Broadcast rcv link refresh failed, no memory\n");
+                       tipc_node_write_unlock_fast(n);
+                       tipc_node_put(n);
+                       n = NULL;
+                       goto exit;
+               }
                n->preliminary = false;
                n->addr = addr;
                hlist_del_rcu(&n->hash);
@@ -567,7 +577,16 @@ update:
        n->signature = INVALID_NODE_SIG;
        n->active_links[0] = INVALID_BEARER_ID;
        n->active_links[1] = INVALID_BEARER_ID;
-       n->bc_entry.link = NULL;
+       if (!preliminary &&
+           !tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX,
+                                tipc_link_min_win(snd_l), tipc_link_max_win(snd_l),
+                                n->capabilities, &n->bc_entry.inputq1,
+                                &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
+               pr_warn("Broadcast rcv link creation failed, no memory\n");
+               kfree(n);
+               n = NULL;
+               goto exit;
+       }
        tipc_node_get(n);
        timer_setup(&n->timer, tipc_node_timeout, 0);
        /* Start a slow timer anyway, crypto needs it */
@@ -1155,7 +1174,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
                          bool *respond, bool *dupl_addr)
 {
        struct tipc_node *n;
-       struct tipc_link *l, *snd_l;
+       struct tipc_link *l;
        struct tipc_link_entry *le;
        bool addr_match = false;
        bool sign_match = false;
@@ -1175,22 +1194,6 @@ void tipc_node_check_dest(struct net *net, u32 addr,
                return;
 
        tipc_node_write_lock(n);
-       if (unlikely(!n->bc_entry.link)) {
-               snd_l = tipc_bc_sndlink(net);
-               if (!tipc_link_bc_create(net, tipc_own_addr(net),
-                                        addr, peer_id, U16_MAX,
-                                        tipc_link_min_win(snd_l),
-                                        tipc_link_max_win(snd_l),
-                                        n->capabilities,
-                                        &n->bc_entry.inputq1,
-                                        &n->bc_entry.namedq, snd_l,
-                                        &n->bc_entry.link)) {
-                       pr_warn("Broadcast rcv link creation failed, no mem\n");
-                       tipc_node_write_unlock_fast(n);
-                       tipc_node_put(n);
-                       return;
-               }
-       }
 
        le = &n->links[b->identity];
 
index 17f8c52..f1c3b8e 100644 (file)
@@ -502,6 +502,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
        sock_init_data(sock, sk);
        tipc_set_sk_state(sk, TIPC_OPEN);
        if (tipc_sk_insert(tsk)) {
+               sk_free(sk);
                pr_warn("Socket create failed; port number exhausted\n");
                return -EINVAL;
        }
@@ -516,7 +517,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
        timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
        sk->sk_shutdown = 0;
        sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
-       sk->sk_rcvbuf = sysctl_tipc_rmem[1];
+       sk->sk_rcvbuf = READ_ONCE(sysctl_tipc_rmem[1]);
        sk->sk_data_ready = tipc_data_ready;
        sk->sk_write_space = tipc_write_space;
        sk->sk_destruct = tipc_sock_destruct;
index ec6f4b6..9975df3 100644 (file)
@@ -97,13 +97,16 @@ static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
        unsigned long flags;
 
        spin_lock_irqsave(&tls_device_lock, flags);
+       if (unlikely(!refcount_dec_and_test(&ctx->refcount)))
+               goto unlock;
+
        list_move_tail(&ctx->list, &tls_device_gc_list);
 
        /* schedule_work inside the spinlock
         * to make sure tls_device_down waits for that work.
         */
        schedule_work(&tls_device_gc_work);
-
+unlock:
        spin_unlock_irqrestore(&tls_device_lock, flags);
 }
 
@@ -194,8 +197,7 @@ void tls_device_sk_destruct(struct sock *sk)
                clean_acked_data_disable(inet_csk(sk));
        }
 
-       if (refcount_dec_and_test(&tls_ctx->refcount))
-               tls_device_queue_ctx_destruction(tls_ctx);
+       tls_device_queue_ctx_destruction(tls_ctx);
 }
 EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
 
@@ -1374,8 +1376,13 @@ static int tls_device_down(struct net_device *netdev)
                 * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
                 * Now release the ref taken above.
                 */
-               if (refcount_dec_and_test(&ctx->refcount))
+               if (refcount_dec_and_test(&ctx->refcount)) {
+                       /* sk_destruct ran after tls_device_down took a ref, and
+                        * it returned early. Complete the destruction here.
+                        */
+                       list_del(&ctx->list);
                        tls_device_free_ctx(ctx);
+               }
        }
 
        up_write(&device_offload_lock);
@@ -1419,9 +1426,9 @@ static struct notifier_block tls_dev_notifier = {
        .notifier_call  = tls_dev_event,
 };
 
-void __init tls_device_init(void)
+int __init tls_device_init(void)
 {
-       register_netdevice_notifier(&tls_dev_notifier);
+       return register_netdevice_notifier(&tls_dev_notifier);
 }
 
 void __exit tls_device_cleanup(void)
index da17641..d80ab3d 100644 (file)
@@ -921,6 +921,8 @@ static void tls_update(struct sock *sk, struct proto *p,
 {
        struct tls_context *ctx;
 
+       WARN_ON_ONCE(sk->sk_prot == p);
+
        ctx = tls_get_ctx(sk);
        if (likely(ctx)) {
                ctx->sk_write_space = write_space;
@@ -1046,7 +1048,12 @@ static int __init tls_register(void)
        if (err)
                return err;
 
-       tls_device_init();
+       err = tls_device_init();
+       if (err) {
+               unregister_pernet_subsys(&tls_proc_ops);
+               return err;
+       }
+
        tcp_register_ulp(&tcp_tls_ulp_ops);
 
        return 0;
index 0513f82..e30649f 100644 (file)
@@ -267,9 +267,6 @@ static int tls_do_decryption(struct sock *sk,
        }
        darg->async = false;
 
-       if (ret == -EBADMSG)
-               TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
-
        return ret;
 }
 
@@ -1579,8 +1576,11 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
        }
 
        err = decrypt_internal(sk, skb, dest, NULL, darg);
-       if (err < 0)
+       if (err < 0) {
+               if (err == -EBADMSG)
+                       TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
                return err;
+       }
        if (darg->async)
                goto decrypt_next;
 
index ff4d48f..607a689 100644 (file)
@@ -1031,7 +1031,8 @@ void __cfg80211_port_authorized(struct wireless_dev *wdev, const u8 *bssid)
 {
        ASSERT_WDEV_LOCK(wdev);
 
-       if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
+       if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
+                   wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
                return;
 
        if (WARN_ON(!wdev->current_bss) ||
index 19ac872..0900238 100644 (file)
@@ -538,12 +538,6 @@ static int xsk_generic_xmit(struct sock *sk)
                        goto out;
                }
 
-               skb = xsk_build_skb(xs, &desc);
-               if (IS_ERR(skb)) {
-                       err = PTR_ERR(skb);
-                       goto out;
-               }
-
                /* This is the backpressure mechanism for the Tx path.
                 * Reserve space in the completion queue and only proceed
                 * if there is space in it. This avoids having to implement
@@ -552,11 +546,19 @@ static int xsk_generic_xmit(struct sock *sk)
                spin_lock_irqsave(&xs->pool->cq_lock, flags);
                if (xskq_prod_reserve(xs->pool->cq)) {
                        spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
-                       kfree_skb(skb);
                        goto out;
                }
                spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
 
+               skb = xsk_build_skb(xs, &desc);
+               if (IS_ERR(skb)) {
+                       err = PTR_ERR(skb);
+                       spin_lock_irqsave(&xs->pool->cq_lock, flags);
+                       xskq_prod_cancel(xs->pool->cq);
+                       spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+                       goto out;
+               }
+
                err = __dev_direct_xmit(skb, xs->queue_id);
                if  (err == NETDEV_TX_BUSY) {
                        /* Tell user-space to retry the send */
index 87bdd71..f701121 100644 (file)
@@ -332,6 +332,7 @@ static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs)
        for (i = 0; i < dma_map->dma_pages_cnt; i++) {
                dma = &dma_map->dma_pages[i];
                if (*dma) {
+                       *dma &= ~XSK_NEXT_PG_CONTIG_MASK;
                        dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
                                             DMA_BIDIRECTIONAL, attrs);
                        *dma = 0;
index f1876ea..f1a0bab 100644 (file)
@@ -2678,8 +2678,10 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family,
                *num_xfrms = 0;
                return 0;
        }
-       if (IS_ERR(pols[0]))
+       if (IS_ERR(pols[0])) {
+               *num_pols = 0;
                return PTR_ERR(pols[0]);
+       }
 
        *num_xfrms = pols[0]->xfrm_nr;
 
@@ -2694,6 +2696,7 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family,
                if (pols[1]) {
                        if (IS_ERR(pols[1])) {
                                xfrm_pols_put(pols, *num_pols);
+                               *num_pols = 0;
                                return PTR_ERR(pols[1]);
                        }
                        (*num_pols)++;
index 08564e0..ccfb172 100644 (file)
@@ -2620,7 +2620,7 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
        int err;
 
        if (family == AF_INET &&
-           xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc)
+           READ_ONCE(xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc))
                x->props.flags |= XFRM_STATE_NOPMTUDISC;
 
        err = -EPROTONOSUPPORT;
index 24d3cf1..e22da85 100644 (file)
 
 #define BACKTRACE_DEPTH 16
 #define MAX_SYMBOL_LEN 4096
-struct fprobe sample_probe;
+static struct fprobe sample_probe;
+static unsigned long nhit;
 
 static char symbol[MAX_SYMBOL_LEN] = "kernel_clone";
 module_param_string(symbol, symbol, sizeof(symbol), 0644);
+MODULE_PARM_DESC(symbol, "Probed symbol(s), given by comma separated symbols or a wildcard pattern.");
+
 static char nosymbol[MAX_SYMBOL_LEN] = "";
 module_param_string(nosymbol, nosymbol, sizeof(nosymbol), 0644);
+MODULE_PARM_DESC(nosymbol, "Not-probed symbols, given by a wildcard pattern.");
+
 static bool stackdump = true;
 module_param(stackdump, bool, 0644);
+MODULE_PARM_DESC(stackdump, "Enable stackdump.");
+
+static bool use_trace = false;
+module_param(use_trace, bool, 0644);
+MODULE_PARM_DESC(use_trace, "Use trace_printk instead of printk. This is only for debugging.");
 
 static void show_backtrace(void)
 {
@@ -40,7 +50,15 @@ static void show_backtrace(void)
 
 static void sample_entry_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs)
 {
-       pr_info("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip);
+       if (use_trace)
+               /*
+                * This is just an example, no kernel code should call
+                * trace_printk() except when actively debugging.
+                */
+               trace_printk("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip);
+       else
+               pr_info("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip);
+       nhit++;
        if (stackdump)
                show_backtrace();
 }
@@ -49,8 +67,17 @@ static void sample_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_r
 {
        unsigned long rip = instruction_pointer(regs);
 
-       pr_info("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n",
-               (void *)ip, (void *)ip, (void *)rip, (void *)rip);
+       if (use_trace)
+               /*
+                * This is just an example, no kernel code should call
+                * trace_printk() except when actively debugging.
+                */
+               trace_printk("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n",
+                       (void *)ip, (void *)ip, (void *)rip, (void *)rip);
+       else
+               pr_info("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n",
+                       (void *)ip, (void *)ip, (void *)rip, (void *)rip);
+       nhit++;
        if (stackdump)
                show_backtrace();
 }
@@ -112,7 +139,8 @@ static void __exit fprobe_exit(void)
 {
        unregister_fprobe(&sample_probe);
 
-       pr_info("fprobe at %s unregistered\n", symbol);
+       pr_info("fprobe at %s unregistered. %ld times hit, %ld times missed\n",
+               symbol, nhit, sample_probe.nmissed);
 }
 
 module_init(fprobe_init)
index f991a66..fd346f5 100644 (file)
@@ -16,9 +16,8 @@
 #include <linux/module.h>
 #include <linux/kprobes.h>
 
-#define MAX_SYMBOL_LEN 64
-static char symbol[MAX_SYMBOL_LEN] = "kernel_clone";
-module_param_string(symbol, symbol, sizeof(symbol), 0644);
+static char symbol[KSYM_NAME_LEN] = "kernel_clone";
+module_param_string(symbol, symbol, KSYM_NAME_LEN, 0644);
 
 /* For each probe you need to allocate a kprobe structure */
 static struct kprobe kp = {
index 228321e..cbf1654 100644 (file)
 #include <linux/module.h>
 #include <linux/kprobes.h>
 #include <linux/ktime.h>
-#include <linux/limits.h>
 #include <linux/sched.h>
 
-static char func_name[NAME_MAX] = "kernel_clone";
-module_param_string(func, func_name, NAME_MAX, S_IRUGO);
+static char func_name[KSYM_NAME_LEN] = "kernel_clone";
+module_param_string(func, func_name, KSYM_NAME_LEN, 0644);
 MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the"
                        " function's execution time");
 
index d142577..3fb6a99 100644 (file)
@@ -236,6 +236,7 @@ objtool_args =                                                              \
        $(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount)             \
        $(if $(CONFIG_UNWINDER_ORC), --orc)                             \
        $(if $(CONFIG_RETPOLINE), --retpoline)                          \
+       $(if $(CONFIG_RETHUNK), --rethunk)                              \
        $(if $(CONFIG_SLS), --sls)                                      \
        $(if $(CONFIG_STACK_VALIDATION), --stackval)                    \
        $(if $(CONFIG_HAVE_STATIC_CALL_INLINE), --static-call)          \
index c2c43a0..16a02e9 100644 (file)
@@ -28,9 +28,6 @@ modules := $(patsubst $(extmod_prefix)%, $(dst)/%$(suffix-y), $(modules))
 __modinst: $(modules)
        @:
 
-quiet_cmd_none =
-      cmd_none = :
-
 #
 # Installation
 #
index 3c97a15..8401981 100644 (file)
@@ -44,7 +44,7 @@ objtool-enabled := $(or $(delay-objtool),$(CONFIG_NOINSTR_VALIDATION))
 
 objtool_args := \
        $(if $(delay-objtool),$(objtool_args)) \
-       $(if $(CONFIG_NOINSTR_VALIDATION), --noinstr) \
+       $(if $(CONFIG_NOINSTR_VALIDATION), --noinstr $(if $(CONFIG_CPU_UNRET_ENTRY), --unret)) \
        $(if $(CONFIG_GCOV_KERNEL), --no-unreachable) \
        --link
 
index 1d1bde1..47da25b 100755 (executable)
@@ -157,10 +157,10 @@ def cmdfiles_for_modorder(modorder):
             if ext != '.ko':
                 sys.exit('{}: module path must end with .ko'.format(ko))
             mod = base + '.mod'
-           # The first line of *.mod lists the objects that compose the module.
+            # Read from *.mod, to get a list of objects that compose the module.
             with open(mod) as m:
-                for obj in m.readline().split():
-                    yield to_cmdfile(obj)
+                for mod_line in m:
+                    yield to_cmdfile(mod_line.rstrip())
 
 
 def process_line(root_directory, command_prefix, file_path):
index 0e6268d..94ed98d 100755 (executable)
@@ -95,17 +95,25 @@ __faddr2line() {
        local print_warnings=$4
 
        local sym_name=${func_addr%+*}
-       local offset=${func_addr#*+}
-       offset=${offset%/*}
+       local func_offset=${func_addr#*+}
+       func_offset=${func_offset%/*}
        local user_size=
+       local file_type
+       local is_vmlinux=0
        [[ $func_addr =~ "/" ]] && user_size=${func_addr#*/}
 
-       if [[ -z $sym_name ]] || [[ -z $offset ]] || [[ $sym_name = $func_addr ]]; then
+       if [[ -z $sym_name ]] || [[ -z $func_offset ]] || [[ $sym_name = $func_addr ]]; then
                warn "bad func+offset $func_addr"
                DONE=1
                return
        fi
 
+       # vmlinux uses absolute addresses in the section table rather than
+       # section offsets.
+       local file_type=$(${READELF} --file-header $objfile |
+               ${AWK} '$1 == "Type:" { print $2; exit }')
+       [[ $file_type = "EXEC" ]] && is_vmlinux=1
+
        # Go through each of the object's symbols which match the func name.
        # In rare cases there might be duplicates, in which case we print all
        # matches.
@@ -114,9 +122,11 @@ __faddr2line() {
                local sym_addr=0x${fields[1]}
                local sym_elf_size=${fields[2]}
                local sym_sec=${fields[6]}
+               local sec_size
+               local sec_name
 
                # Get the section size:
-               local sec_size=$(${READELF} --section-headers --wide $objfile |
+               sec_size=$(${READELF} --section-headers --wide $objfile |
                        sed 's/\[ /\[/' |
                        ${AWK} -v sec=$sym_sec '$1 == "[" sec "]" { print "0x" $6; exit }')
 
@@ -126,6 +136,17 @@ __faddr2line() {
                        return
                fi
 
+               # Get the section name:
+               sec_name=$(${READELF} --section-headers --wide $objfile |
+                       sed 's/\[ /\[/' |
+                       ${AWK} -v sec=$sym_sec '$1 == "[" sec "]" { print $2; exit }')
+
+               if [[ -z $sec_name ]]; then
+                       warn "bad section name: section: $sym_sec"
+                       DONE=1
+                       return
+               fi
+
                # Calculate the symbol size.
                #
                # Unfortunately we can't use the ELF size, because kallsyms
@@ -174,10 +195,10 @@ __faddr2line() {
 
                sym_size=0x$(printf %x $sym_size)
 
-               # Calculate the section address from user-supplied offset:
-               local addr=$(($sym_addr + $offset))
+               # Calculate the address from user-supplied offset:
+               local addr=$(($sym_addr + $func_offset))
                if [[ -z $addr ]] || [[ $addr = 0 ]]; then
-                       warn "bad address: $sym_addr + $offset"
+                       warn "bad address: $sym_addr + $func_offset"
                        DONE=1
                        return
                fi
@@ -191,9 +212,9 @@ __faddr2line() {
                fi
 
                # Make sure the provided offset is within the symbol's range:
-               if [[ $offset -gt $sym_size ]]; then
+               if [[ $func_offset -gt $sym_size ]]; then
                        [[ $print_warnings = 1 ]] &&
-                               echo "skipping $sym_name address at $addr due to size mismatch ($offset > $sym_size)"
+                               echo "skipping $sym_name address at $addr due to size mismatch ($func_offset > $sym_size)"
                        continue
                fi
 
@@ -202,11 +223,13 @@ __faddr2line() {
                [[ $FIRST = 0 ]] && echo
                FIRST=0
 
-               echo "$sym_name+$offset/$sym_size:"
+               echo "$sym_name+$func_offset/$sym_size:"
 
                # Pass section address to addr2line and strip absolute paths
                # from the output:
-               local output=$(${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;")
+               local args="--functions --pretty-print --inlines --exe=$objfile"
+               [[ $is_vmlinux = 0 ]] && args="$args --section=$sec_name"
+               local output=$(${ADDR2LINE} $args $addr | sed "s; $dir_prefix\(\./\)*; ;")
                [[ -z $output ]] && continue
 
                # Default output (non --list):
index 46f7542..dc07b6d 100644 (file)
@@ -180,7 +180,7 @@ lx-symbols command."""
                 self.breakpoint.delete()
                 self.breakpoint = None
             self.breakpoint = LoadModuleBreakpoint(
-                "kernel/module.c:do_init_module", self)
+                "kernel/module/main.c:do_init_module", self)
         else:
             gdb.write("Note: symbol update on module loading not supported "
                       "with this gdb version\n")
index faacf70..653fadb 100755 (executable)
@@ -56,4 +56,7 @@ EOT
 # point addresses.
 sed -e 's/^\.//' |
 sort -u |
+# Ignore __this_module. It's not an exported symbol, and will be resolved
+# when the final .ko's are linked.
+grep -v '^__this_module$' |
 sed -e 's/\(.*\)/#define __KSYM_\1 1/' >> "$output_file"
index 29d5a84..620dc8c 100644 (file)
@@ -980,7 +980,7 @@ static const struct sectioncheck sectioncheck[] = {
 },
 /* Do not export init/exit functions or data */
 {
-       .fromsec = { "__ksymtab*", NULL },
+       .fromsec = { "___ksymtab*", NULL },
        .bad_tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
        .mismatch = EXPORT_TO_INIT_EXIT,
        .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
index 7adab46..379e86c 100755 (executable)
@@ -41,3 +41,5 @@ if [ -n "${building_out_of_srctree}" ]; then
 fi
 
 rm -f scripts/extract-cert
+
+rm -f arch/x86/purgatory/kexec-purgatory.c
index f29e4c6..e6db09a 100644 (file)
@@ -54,17 +54,6 @@ config SECURITY_NETWORK
          implement socket and networking access controls.
          If you are unsure how to answer this question, answer N.
 
-config PAGE_TABLE_ISOLATION
-       bool "Remove the kernel mapping in user mode"
-       default y
-       depends on (X86_64 || X86_PAE) && !UML
-       help
-         This feature reduces the number of hardware side channels by
-         ensuring that the majority of kernel addresses are not mapped
-         into userspace.
-
-         See Documentation/x86/pti.rst for more details.
-
 config SECURITY_INFINIBAND
        bool "Infiniband Security Hooks"
        depends on SECURITY && INFINIBAND
index a733aff..708de96 100644 (file)
@@ -75,7 +75,7 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo)
 {
        long rc;
        const char *algo;
-       struct crypto_shash **tfm, *tmp_tfm = NULL;
+       struct crypto_shash **tfm, *tmp_tfm;
        struct shash_desc *desc;
 
        if (type == EVM_XATTR_HMAC) {
@@ -120,16 +120,13 @@ unlock:
 alloc:
        desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),
                        GFP_KERNEL);
-       if (!desc) {
-               crypto_free_shash(tmp_tfm);
+       if (!desc)
                return ERR_PTR(-ENOMEM);
-       }
 
        desc->tfm = *tfm;
 
        rc = crypto_shash_init(desc);
        if (rc) {
-               crypto_free_shash(tmp_tfm);
                kfree(desc);
                return ERR_PTR(rc);
        }
index cc88f02..93e8bc0 100644 (file)
@@ -755,13 +755,14 @@ void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
        evm_update_evmxattr(dentry, xattr_name, NULL, 0);
 }
 
-static int evm_attr_change(struct dentry *dentry, struct iattr *attr)
+static int evm_attr_change(struct user_namespace *mnt_userns,
+                          struct dentry *dentry, struct iattr *attr)
 {
        struct inode *inode = d_backing_inode(dentry);
        unsigned int ia_valid = attr->ia_valid;
 
-       if ((!(ia_valid & ATTR_UID) || uid_eq(attr->ia_uid, inode->i_uid)) &&
-           (!(ia_valid & ATTR_GID) || gid_eq(attr->ia_gid, inode->i_gid)) &&
+       if (!i_uid_needs_update(mnt_userns, attr, inode) &&
+           !i_gid_needs_update(mnt_userns, attr, inode) &&
            (!(ia_valid & ATTR_MODE) || attr->ia_mode == inode->i_mode))
                return 0;
 
@@ -775,7 +776,8 @@ static int evm_attr_change(struct dentry *dentry, struct iattr *attr)
  * Permit update of file attributes when files have a valid EVM signature,
  * except in the case of them having an immutable portable signature.
  */
-int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+int evm_inode_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+                     struct iattr *attr)
 {
        unsigned int ia_valid = attr->ia_valid;
        enum integrity_status evm_status;
@@ -801,7 +803,7 @@ int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
                return 0;
 
        if (evm_status == INTEGRITY_PASS_IMMUTABLE &&
-           !evm_attr_change(dentry, attr))
+           !evm_attr_change(mnt_userns, dentry, attr))
                return 0;
 
        integrity_audit_msg(AUDIT_INTEGRITY_METADATA, d_backing_inode(dentry),
index cdb84dc..bde74fc 100644 (file)
@@ -514,7 +514,8 @@ int ima_appraise_measurement(enum ima_hooks func,
                goto out;
        }
 
-       status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint);
+       status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value,
+                                rc < 0 ? 0 : rc, iint);
        switch (status) {
        case INTEGRITY_PASS:
        case INTEGRITY_PASS_IMMUTABLE:
index a7206cc..6449905 100644 (file)
@@ -205,6 +205,7 @@ out_array:
 
                crypto_free_shash(ima_algo_array[i].tfm);
        }
+       kfree(ima_algo_array);
 out:
        crypto_free_shash(ima_shash_tfm);
        return rc;
index 71786d0..9db66fe 100644 (file)
@@ -67,6 +67,8 @@ const char * const *arch_get_ima_policy(void)
        if (IS_ENABLED(CONFIG_IMA_ARCH_POLICY) && arch_ima_get_secureboot()) {
                if (IS_ENABLED(CONFIG_MODULE_SIG))
                        set_module_sig_enforced();
+               if (IS_ENABLED(CONFIG_KEXEC_SIG))
+                       set_kexec_sig_enforced();
                return sb_arch_rules;
        }
        return NULL;
index 1375313..419dc40 100644 (file)
@@ -137,7 +137,7 @@ void ima_add_kexec_buffer(struct kimage *image)
 /*
  * Restore the measurement list from the previous kernel.
  */
-void ima_load_kexec_buffer(void)
+void __init ima_load_kexec_buffer(void)
 {
        void *kexec_buffer = NULL;
        size_t kexec_buffer_size = 0;
index 7391741..a8802b8 100644 (file)
@@ -2247,6 +2247,10 @@ bool ima_appraise_signature(enum kernel_read_file_id id)
        if (id >= READING_MAX_ID)
                return false;
 
+       if (id == READING_KEXEC_IMAGE && !(ima_appraise & IMA_APPRAISE_ENFORCE)
+           && security_locked_down(LOCKDOWN_KEXEC))
+               return false;
+
        func = read_idmap[id] ?: FILE_CHECK;
 
        rcu_read_lock();
index c877f01..7bf9b15 100644 (file)
@@ -323,10 +323,10 @@ static int ima_eventdigest_init_common(const u8 *digest, u32 digestsize,
        else
                /*
                 * If digest is NULL, the event being recorded is a violation.
-                * Make room for the digest by increasing the offset of
-                * IMA_DIGEST_SIZE.
+                * Make room for the digest by increasing the offset by the
+                * hash algorithm digest size.
                 */
-               offset += IMA_DIGEST_SIZE;
+               offset += hash_digest_size[hash_algo];
 
        return ima_write_template_field_data(buffer, offset + digestsize,
                                             fmt, field_data);
index 188b8f7..f85afb0 100644 (file)
@@ -1324,7 +1324,8 @@ int security_inode_permission(struct inode *inode, int mask)
        return call_int_hook(inode_permission, 0, inode, mask);
 }
 
-int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
+int security_inode_setattr(struct user_namespace *mnt_userns,
+                          struct dentry *dentry, struct iattr *attr)
 {
        int ret;
 
@@ -1333,7 +1334,7 @@ int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
        ret = call_int_hook(inode_setattr, 0, dentry, attr);
        if (ret)
                return ret;
-       return evm_inode_setattr(dentry, attr);
+       return evm_inode_setattr(mnt_userns, dentry, attr);
 }
 EXPORT_SYMBOL_GPL(security_inode_setattr);
 
index beceb89..1bbd533 100644 (file)
@@ -2600,8 +2600,9 @@ static int selinux_sb_eat_lsm_opts(char *options, void **mnt_opts)
                                }
                        }
                        rc = selinux_add_opt(token, arg, mnt_opts);
+                       kfree(arg);
+                       arg = NULL;
                        if (unlikely(rc)) {
-                               kfree(arg);
                                goto free_opt;
                        }
                } else {
@@ -2792,17 +2793,13 @@ static int selinux_fs_context_parse_param(struct fs_context *fc,
                                          struct fs_parameter *param)
 {
        struct fs_parse_result result;
-       int opt, rc;
+       int opt;
 
        opt = fs_parse(fc, selinux_fs_parameters, param, &result);
        if (opt < 0)
                return opt;
 
-       rc = selinux_add_opt(opt, param->string, &fc->security);
-       if (!rc)
-               param->string = NULL;
-
-       return rc;
+       return selinux_add_opt(opt, param->string, &fc->security);
 }
 
 /* inode security operations */
index 15dc716..8cfdaee 100644 (file)
@@ -431,33 +431,17 @@ static const struct snd_malloc_ops snd_dma_iram_ops = {
  */
 static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
 {
-       void *p;
-
-       p = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
-#ifdef CONFIG_X86
-       if (p && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
-               set_memory_wc((unsigned long)p, PAGE_ALIGN(size) >> PAGE_SHIFT);
-#endif
-       return p;
+       return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
 }
 
 static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
 {
-#ifdef CONFIG_X86
-       if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
-               set_memory_wb((unsigned long)dmab->area,
-                             PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
-#endif
        dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
 }
 
 static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
                            struct vm_area_struct *area)
 {
-#ifdef CONFIG_X86
-       if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
-               area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
-#endif
        return dma_mmap_coherent(dmab->dev.dev, area,
                                 dmab->area, dmab->addr, dmab->bytes);
 }
@@ -471,10 +455,6 @@ static const struct snd_malloc_ops snd_dma_dev_ops = {
 /*
  * Write-combined pages
  */
-#ifdef CONFIG_X86
-/* On x86, share the same ops as the standard dev ops */
-#define snd_dma_wc_ops snd_dma_dev_ops
-#else /* CONFIG_X86 */
 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
 {
        return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
@@ -497,7 +477,6 @@ static const struct snd_malloc_ops snd_dma_wc_ops = {
        .free = snd_dma_wc_free,
        .mmap = snd_dma_wc_mmap,
 };
-#endif /* CONFIG_X86 */
 
 #ifdef CONFIG_SND_DMA_SGBUF
 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
index 3f35972..161a971 100644 (file)
@@ -119,21 +119,18 @@ static int i915_component_master_match(struct device *dev, int subcomponent,
 /* check whether Intel graphics is present and reachable */
 static int i915_gfx_present(struct pci_dev *hdac_pci)
 {
-       unsigned int class = PCI_BASE_CLASS_DISPLAY << 16;
        struct pci_dev *display_dev = NULL;
-       bool match = false;
 
-       do {
-               display_dev = pci_get_class(class, display_dev);
-
-               if (display_dev && display_dev->vendor == PCI_VENDOR_ID_INTEL &&
+       for_each_pci_dev(display_dev) {
+               if (display_dev->vendor == PCI_VENDOR_ID_INTEL &&
+                   (display_dev->class >> 16) == PCI_BASE_CLASS_DISPLAY &&
                    connectivity_check(display_dev, hdac_pci)) {
                        pci_dev_put(display_dev);
-                       match = true;
+                       return true;
                }
-       } while (!match && display_dev);
+       }
 
-       return match;
+       return false;
 }
 
 /**
index a8fe017..ec9cbb2 100644 (file)
@@ -196,6 +196,12 @@ static const struct config_entry config_table[] = {
                                        DMI_MATCH(DMI_SYS_VENDOR, "Google"),
                                }
                        },
+                       {
+                               .ident = "UP-WHL",
+                               .matches = {
+                                       DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
+                               }
+                       },
                        {}
                }
        },
@@ -358,6 +364,12 @@ static const struct config_entry config_table[] = {
                                        DMI_MATCH(DMI_SYS_VENDOR, "Google"),
                                }
                        },
+                       {
+                               .ident = "UPX-TGL",
+                               .matches = {
+                                       DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
+                               }
+                       },
                        {}
                }
        },
index 4063da3..9db5ccd 100644 (file)
@@ -55,8 +55,8 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt)
 
                /* find max number of channels based on format_configuration */
                if (fmt_configs->fmt_count) {
-                       dev_dbg(dev, "%s: found %d format definitions\n",
-                               __func__, fmt_configs->fmt_count);
+                       dev_dbg(dev, "found %d format definitions\n",
+                               fmt_configs->fmt_count);
 
                        for (i = 0; i < fmt_configs->fmt_count; i++) {
                                struct wav_fmt_ext *fmt_ext;
@@ -66,9 +66,9 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt)
                                if (fmt_ext->fmt.channels > max_ch)
                                        max_ch = fmt_ext->fmt.channels;
                        }
-                       dev_dbg(dev, "%s: max channels found %d\n", __func__, max_ch);
+                       dev_dbg(dev, "max channels found %d\n", max_ch);
                } else {
-                       dev_dbg(dev, "%s: No format information found\n", __func__);
+                       dev_dbg(dev, "No format information found\n");
                }
 
                if (cfg->device_config.config_type != NHLT_CONFIG_TYPE_MIC_ARRAY) {
@@ -95,17 +95,16 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt)
                        }
 
                        if (dmic_geo > 0) {
-                               dev_dbg(dev, "%s: Array with %d dmics\n", __func__, dmic_geo);
+                               dev_dbg(dev, "Array with %d dmics\n", dmic_geo);
                        }
                        if (max_ch > dmic_geo) {
-                               dev_dbg(dev, "%s: max channels %d exceed dmic number %d\n",
-                                       __func__, max_ch, dmic_geo);
+                               dev_dbg(dev, "max channels %d exceed dmic number %d\n",
+                                       max_ch, dmic_geo);
                        }
                }
        }
 
-       dev_dbg(dev, "%s: dmic number %d max_ch %d\n",
-               __func__, dmic_geo, max_ch);
+       dev_dbg(dev, "dmic number %d max_ch %d\n", dmic_geo, max_ch);
 
        return dmic_geo;
 }
index bd60308..8634004 100644 (file)
@@ -74,36 +74,36 @@ static int snd_card_cs46xx_probe(struct pci_dev *pci,
        err = snd_cs46xx_create(card, pci,
                                external_amp[dev], thinkpad[dev]);
        if (err < 0)
-               return err;
+               goto error;
        card->private_data = chip;
        chip->accept_valid = mmap_valid[dev];
        err = snd_cs46xx_pcm(chip, 0);
        if (err < 0)
-               return err;
+               goto error;
 #ifdef CONFIG_SND_CS46XX_NEW_DSP
        err = snd_cs46xx_pcm_rear(chip, 1);
        if (err < 0)
-               return err;
+               goto error;
        err = snd_cs46xx_pcm_iec958(chip, 2);
        if (err < 0)
-               return err;
+               goto error;
 #endif
        err = snd_cs46xx_mixer(chip, 2);
        if (err < 0)
-               return err;
+               goto error;
 #ifdef CONFIG_SND_CS46XX_NEW_DSP
        if (chip->nr_ac97_codecs ==2) {
                err = snd_cs46xx_pcm_center_lfe(chip, 3);
                if (err < 0)
-                       return err;
+                       goto error;
        }
 #endif
        err = snd_cs46xx_midi(chip, 0);
        if (err < 0)
-               return err;
+               goto error;
        err = snd_cs46xx_start_dsp(chip);
        if (err < 0)
-               return err;
+               goto error;
 
        snd_cs46xx_gameport(chip);
 
@@ -117,11 +117,15 @@ static int snd_card_cs46xx_probe(struct pci_dev *pci,
 
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
 
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver cs46xx_driver = {
index cd1db94..7c6b1fe 100644 (file)
@@ -819,7 +819,7 @@ static void set_pin_targets(struct hda_codec *codec,
                snd_hda_set_pin_ctl_cache(codec, cfg->nid, cfg->val);
 }
 
-static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
+void __snd_hda_apply_fixup(struct hda_codec *codec, int id, int action, int depth)
 {
        const char *modelname = codec->fixup_name;
 
@@ -829,7 +829,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
                if (++depth > 10)
                        break;
                if (fix->chained_before)
-                       apply_fixup(codec, fix->chain_id, action, depth + 1);
+                       __snd_hda_apply_fixup(codec, fix->chain_id, action, depth + 1);
 
                switch (fix->type) {
                case HDA_FIXUP_PINS:
@@ -870,6 +870,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
                id = fix->chain_id;
        }
 }
+EXPORT_SYMBOL_GPL(__snd_hda_apply_fixup);
 
 /**
  * snd_hda_apply_fixup - Apply the fixup chain with the given action
@@ -879,7 +880,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
 void snd_hda_apply_fixup(struct hda_codec *codec, int action)
 {
        if (codec->fixup_list)
-               apply_fixup(codec, codec->fixup_id, action, 0);
+               __snd_hda_apply_fixup(codec, codec->fixup_id, action, 0);
 }
 EXPORT_SYMBOL_GPL(snd_hda_apply_fixup);
 
index aca5926..682dca2 100644 (file)
@@ -348,6 +348,7 @@ void snd_hda_apply_verbs(struct hda_codec *codec);
 void snd_hda_apply_pincfgs(struct hda_codec *codec,
                           const struct hda_pintbl *cfg);
 void snd_hda_apply_fixup(struct hda_codec *codec, int action);
+void __snd_hda_apply_fixup(struct hda_codec *codec, int id, int action, int depth);
 void snd_hda_pick_fixup(struct hda_codec *codec,
                        const struct hda_model_fixup *models,
                        const struct snd_pci_quirk *quirk,
index 1248d1a..83ae21a 100644 (file)
@@ -944,6 +944,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
        SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x103c, 0x82b4, "HP ProDesk 600 G3", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
        SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
        SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
@@ -1079,11 +1080,11 @@ static int patch_conexant_auto(struct hda_codec *codec)
        if (err < 0)
                goto error;
 
-       err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg);
+       err = cx_auto_parse_beep(codec);
        if (err < 0)
                goto error;
 
-       err = cx_auto_parse_beep(codec);
+       err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg);
        if (err < 0)
                goto error;
 
index b0f9541..2f55bc4 100644 (file)
@@ -2634,6 +2634,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67f1, "Clevo PC70H[PRS]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x67f5, "Clevo PD70PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170SM", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x7715, "Clevo X170KM-G", ALC1220_FIXUP_CLEVO_PB51ED),
@@ -6900,6 +6901,7 @@ enum {
        ALC298_FIXUP_LENOVO_SPK_VOLUME,
        ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
        ALC269_FIXUP_ATIV_BOOK_8,
+       ALC221_FIXUP_HP_288PRO_MIC_NO_PRESENCE,
        ALC221_FIXUP_HP_MIC_NO_PRESENCE,
        ALC256_FIXUP_ASUS_HEADSET_MODE,
        ALC256_FIXUP_ASUS_MIC,
@@ -7004,6 +7006,7 @@ enum {
        ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS,
        ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
        ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
+       ALC298_FIXUP_LENOVO_C940_DUET7,
        ALC287_FIXUP_13S_GEN2_SPEAKERS,
        ALC256_FIXUP_SET_COEF_DEFAULTS,
        ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
@@ -7022,6 +7025,23 @@ enum {
        ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE,
 };
 
+/* A special fixup for Lenovo C940 and Yoga Duet 7;
+ * both have the very same PCI SSID, and we need to apply different fixups
+ * depending on the codec ID
+ */
+static void alc298_fixup_lenovo_c940_duet7(struct hda_codec *codec,
+                                          const struct hda_fixup *fix,
+                                          int action)
+{
+       int id;
+
+       if (codec->core.vendor_id == 0x10ec0298)
+               id = ALC298_FIXUP_LENOVO_SPK_VOLUME; /* C940 */
+       else
+               id = ALC287_FIXUP_YOGA7_14ITL_SPEAKERS; /* Duet 7 */
+       __snd_hda_apply_fixup(codec, id, action, 0);
+}
+
 static const struct hda_fixup alc269_fixups[] = {
        [ALC269_FIXUP_GPIO2] = {
                .type = HDA_FIXUP_FUNC,
@@ -7818,6 +7838,16 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_NO_SHUTUP
        },
+       [ALC221_FIXUP_HP_288PRO_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+                       { 0x1a, 0x01813030 }, /* use as headphone mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE
+       },
        [ALC221_FIXUP_HP_MIC_NO_PRESENCE] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -8721,6 +8751,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE,
        },
+       [ALC298_FIXUP_LENOVO_C940_DUET7] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc298_fixup_lenovo_c940_duet7,
+       },
        [ALC287_FIXUP_13S_GEN2_SPEAKERS] = {
                .type = HDA_FIXUP_VERBS,
                .v.verbs = (const struct hda_verb[]) {
@@ -8863,6 +8897,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x129c, "Acer SWIFT SF314-55", ALC256_FIXUP_ACER_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x129d, "Acer SWIFT SF313-51", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1300, "Acer SWIFT SF314-56", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
@@ -8872,6 +8907,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
+       SND_PCI_QUIRK(0x1028, 0x053c, "Dell Latitude E5430", ALC292_FIXUP_DELL_E7X),
        SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
        SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
        SND_PCI_QUIRK(0x1028, 0x05be, "Dell Latitude E6540", ALC292_FIXUP_DELL_E7X),
@@ -8987,6 +9023,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+       SND_PCI_QUIRK(0x103c, 0x2b5e, "HP 288 Pro G2 MT", ALC221_FIXUP_HP_288PRO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
@@ -9022,6 +9059,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
                      ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8783, "HP ZBook Fury 15 G7 Mobile Workstation",
                      ALC285_FIXUP_HP_GPIO_AMP_INIT),
+       SND_PCI_QUIRK(0x103c, 0x8787, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
@@ -9072,6 +9110,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8aa8, "HP EliteBook 640 G9 (MB 8AA6)", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8aab, "HP EliteBook 650 G9 (MB 8AA9)", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -9187,6 +9229,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x70f3, "Clevo NH77DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x7716, "Clevo NS50PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x7718, "Clevo L140PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -9273,7 +9317,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
        SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
        SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
-       SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
+       SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7),
        SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
        SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
        SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
@@ -9329,6 +9373,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
        SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
        SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
        SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
@@ -10737,6 +10782,7 @@ enum {
        ALC668_FIXUP_MIC_DET_COEF,
        ALC897_FIXUP_LENOVO_HEADSET_MIC,
        ALC897_FIXUP_HEADSET_MIC_PIN,
+       ALC897_FIXUP_HP_HSMIC_VERB,
 };
 
 static const struct hda_fixup alc662_fixups[] = {
@@ -11156,6 +11202,13 @@ static const struct hda_fixup alc662_fixups[] = {
                .chained = true,
                .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MIC
        },
+       [ALC897_FIXUP_HP_HSMIC_VERB] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+       },
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -11181,7 +11234,9 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+       SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
        SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
+       SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2),
        SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2),
        SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
        SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
index a05304f..aea7fae 100644 (file)
@@ -518,11 +518,11 @@ static int via_parse_auto_config(struct hda_codec *codec)
        if (err < 0)
                return err;
 
-       err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg);
+       err = auto_parse_beep(codec);
        if (err < 0)
                return err;
 
-       err = auto_parse_beep(codec);
+       err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg);
        if (err < 0)
                return err;
 
index 55e773f..93606e5 100644 (file)
@@ -868,10 +868,12 @@ static void ak4613_parse_of(struct ak4613_priv *priv,
 
        /*
         * connected STDI
+        * TDM support is assuming it is probed via Audio-Graph-Card style here.
+        * Default is SDTIx1 if it was probed via Simple-Audio-Card for now.
         */
        sdti_num = of_graph_get_endpoint_count(np);
-       if (WARN_ON((sdti_num > 3) || (sdti_num < 1)))
-               return;
+       if ((sdti_num >= SDTx_MAX) || (sdti_num < 1))
+               sdti_num = 1;
 
        AK4613_CONFIG_SDTI_set(priv, sdti_num);
 }
index e32871b..7434aee 100644 (file)
@@ -1760,8 +1760,8 @@ static bool arizona_aif_cfg_changed(struct snd_soc_component *component,
        if (bclk != (val & ARIZONA_AIF1_BCLK_FREQ_MASK))
                return true;
 
-       val = snd_soc_component_read(component, base + ARIZONA_AIF_TX_BCLK_RATE);
-       if (lrclk != (val & ARIZONA_AIF1TX_BCPF_MASK))
+       val = snd_soc_component_read(component, base + ARIZONA_AIF_RX_BCLK_RATE);
+       if (lrclk != (val & ARIZONA_AIF1RX_BCPF_MASK))
                return true;
 
        val = snd_soc_component_read(component, base + ARIZONA_AIF_FRAME_CTRL_1);
index 6d3070e..198cfe5 100644 (file)
@@ -37,8 +37,8 @@ static const struct reg_default cs35l41_reg[] = {
        { CS35L41_DAC_PCM1_SRC,                 0x00000008 },
        { CS35L41_ASP_TX1_SRC,                  0x00000018 },
        { CS35L41_ASP_TX2_SRC,                  0x00000019 },
-       { CS35L41_ASP_TX3_SRC,                  0x00000020 },
-       { CS35L41_ASP_TX4_SRC,                  0x00000021 },
+       { CS35L41_ASP_TX3_SRC,                  0x00000000 },
+       { CS35L41_ASP_TX4_SRC,                  0x00000000 },
        { CS35L41_DSP1_RX1_SRC,                 0x00000008 },
        { CS35L41_DSP1_RX2_SRC,                 0x00000009 },
        { CS35L41_DSP1_RX3_SRC,                 0x00000018 },
@@ -644,6 +644,8 @@ static const struct reg_sequence cs35l41_reva0_errata_patch[] = {
        { CS35L41_DSP1_XM_ACCEL_PL0_PRI, 0x00000000 },
        { CS35L41_PWR_CTRL2,             0x00000000 },
        { CS35L41_AMP_GAIN_CTRL,         0x00000000 },
+       { CS35L41_ASP_TX3_SRC,           0x00000000 },
+       { CS35L41_ASP_TX4_SRC,           0x00000000 },
 };
 
 static const struct reg_sequence cs35l41_revb0_errata_patch[] = {
@@ -655,6 +657,8 @@ static const struct reg_sequence cs35l41_revb0_errata_patch[] = {
        { CS35L41_DSP1_XM_ACCEL_PL0_PRI, 0x00000000 },
        { CS35L41_PWR_CTRL2,             0x00000000 },
        { CS35L41_AMP_GAIN_CTRL,         0x00000000 },
+       { CS35L41_ASP_TX3_SRC,           0x00000000 },
+       { CS35L41_ASP_TX4_SRC,           0x00000000 },
 };
 
 static const struct reg_sequence cs35l41_revb2_errata_patch[] = {
@@ -666,6 +670,8 @@ static const struct reg_sequence cs35l41_revb2_errata_patch[] = {
        { CS35L41_DSP1_XM_ACCEL_PL0_PRI, 0x00000000 },
        { CS35L41_PWR_CTRL2,             0x00000000 },
        { CS35L41_AMP_GAIN_CTRL,         0x00000000 },
+       { CS35L41_ASP_TX3_SRC,           0x00000000 },
+       { CS35L41_ASP_TX4_SRC,           0x00000000 },
 };
 
 static const struct reg_sequence cs35l41_fs_errata_patch[] = {
index 3e68a07..71ab2a5 100644 (file)
@@ -333,7 +333,7 @@ static const struct snd_kcontrol_new cs35l41_aud_controls[] = {
        SOC_SINGLE("HW Noise Gate Enable", CS35L41_NG_CFG, 8, 63, 0),
        SOC_SINGLE("HW Noise Gate Delay", CS35L41_NG_CFG, 4, 7, 0),
        SOC_SINGLE("HW Noise Gate Threshold", CS35L41_NG_CFG, 0, 7, 0),
-       SOC_SINGLE("Aux Noise Gate CH1 Enable",
+       SOC_SINGLE("Aux Noise Gate CH1 Switch",
                   CS35L41_MIXER_NGATE_CH1_CFG, 16, 1, 0),
        SOC_SINGLE("Aux Noise Gate CH1 Entry Delay",
                   CS35L41_MIXER_NGATE_CH1_CFG, 8, 15, 0),
@@ -341,15 +341,15 @@ static const struct snd_kcontrol_new cs35l41_aud_controls[] = {
                   CS35L41_MIXER_NGATE_CH1_CFG, 0, 7, 0),
        SOC_SINGLE("Aux Noise Gate CH2 Entry Delay",
                   CS35L41_MIXER_NGATE_CH2_CFG, 8, 15, 0),
-       SOC_SINGLE("Aux Noise Gate CH2 Enable",
+       SOC_SINGLE("Aux Noise Gate CH2 Switch",
                   CS35L41_MIXER_NGATE_CH2_CFG, 16, 1, 0),
        SOC_SINGLE("Aux Noise Gate CH2 Threshold",
                   CS35L41_MIXER_NGATE_CH2_CFG, 0, 7, 0),
-       SOC_SINGLE("SCLK Force", CS35L41_SP_FORMAT, CS35L41_SCLK_FRC_SHIFT, 1, 0),
-       SOC_SINGLE("LRCLK Force", CS35L41_SP_FORMAT, CS35L41_LRCLK_FRC_SHIFT, 1, 0),
-       SOC_SINGLE("Invert Class D", CS35L41_AMP_DIG_VOL_CTRL,
+       SOC_SINGLE("SCLK Force Switch", CS35L41_SP_FORMAT, CS35L41_SCLK_FRC_SHIFT, 1, 0),
+       SOC_SINGLE("LRCLK Force Switch", CS35L41_SP_FORMAT, CS35L41_LRCLK_FRC_SHIFT, 1, 0),
+       SOC_SINGLE("Invert Class D Switch", CS35L41_AMP_DIG_VOL_CTRL,
                   CS35L41_AMP_INV_PCM_SHIFT, 1, 0),
-       SOC_SINGLE("Amp Gain ZC", CS35L41_AMP_GAIN_CTRL,
+       SOC_SINGLE("Amp Gain ZC Switch", CS35L41_AMP_GAIN_CTRL,
                   CS35L41_AMP_GAIN_ZC_SHIFT, 1, 0),
        WM_ADSP2_PRELOAD_SWITCH("DSP1", 1),
        WM_ADSP_FW_CONTROL("DSP1", 0),
index 391fd7d..1c7d52b 100644 (file)
@@ -122,6 +122,9 @@ static int cs47l15_in1_adc_put(struct snd_kcontrol *kcontrol,
                snd_soc_kcontrol_component(kcontrol);
        struct cs47l15 *cs47l15 = snd_soc_component_get_drvdata(component);
 
+       if (!!ucontrol->value.integer.value[0] == cs47l15->in1_lp_mode)
+               return 0;
+
        switch (ucontrol->value.integer.value[0]) {
        case 0:
                /* Set IN1 to normal mode */
@@ -150,7 +153,7 @@ static int cs47l15_in1_adc_put(struct snd_kcontrol *kcontrol,
                break;
        }
 
-       return 0;
+       return 1;
 }
 
 static const struct snd_kcontrol_new cs47l15_snd_controls[] = {
index a1b8dcd..444026b 100644 (file)
@@ -119,7 +119,13 @@ static int cs47l92_put_demux(struct snd_kcontrol *kcontrol,
 end:
        snd_soc_dapm_mutex_unlock(dapm);
 
-       return snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
+       ret = snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
+       if (ret < 0) {
+               dev_err(madera->dev, "Failed to update demux power state: %d\n", ret);
+               return ret;
+       }
+
+       return change;
 }
 
 static SOC_ENUM_SINGLE_DECL(cs47l92_outdemux_enum,
index 272041c..b9f19fb 100644 (file)
@@ -618,7 +618,13 @@ int madera_out1_demux_put(struct snd_kcontrol *kcontrol,
 end:
        snd_soc_dapm_mutex_unlock(dapm);
 
-       return snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
+       ret = snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
+       if (ret < 0) {
+               dev_err(madera->dev, "Failed to update demux power state: %d\n", ret);
+               return ret;
+       }
+
+       return change;
 }
 EXPORT_SYMBOL_GPL(madera_out1_demux_put);
 
@@ -893,7 +899,7 @@ static int madera_adsp_rate_put(struct snd_kcontrol *kcontrol,
        struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
        const int adsp_num = e->shift_l;
        const unsigned int item = ucontrol->value.enumerated.item[0];
-       int ret;
+       int ret = 0;
 
        if (item >= e->items)
                return -EINVAL;
@@ -910,10 +916,10 @@ static int madera_adsp_rate_put(struct snd_kcontrol *kcontrol,
                         "Cannot change '%s' while in use by active audio paths\n",
                         kcontrol->id.name);
                ret = -EBUSY;
-       } else {
+       } else if (priv->adsp_rate_cache[adsp_num] != e->values[item]) {
                /* Volatile register so defer until the codec is powered up */
                priv->adsp_rate_cache[adsp_num] = e->values[item];
-               ret = 0;
+               ret = 1;
        }
 
        mutex_unlock(&priv->rate_lock);
index f47e956..97b6447 100644 (file)
@@ -862,6 +862,16 @@ static int max98373_sdw_probe(struct sdw_slave *slave,
        return max98373_init(slave, regmap);
 }
 
+static int max98373_sdw_remove(struct sdw_slave *slave)
+{
+       struct max98373_priv *max98373 = dev_get_drvdata(&slave->dev);
+
+       if (max98373->first_hw_init)
+               pm_runtime_disable(&slave->dev);
+
+       return 0;
+}
+
 #if defined(CONFIG_OF)
 static const struct of_device_id max98373_of_match[] = {
        { .compatible = "maxim,max98373", },
@@ -893,7 +903,7 @@ static struct sdw_driver max98373_sdw_driver = {
                .pm = &max98373_pm,
        },
        .probe = max98373_sdw_probe,
-       .remove = NULL,
+       .remove = max98373_sdw_remove,
        .ops = &max98373_slave_ops,
        .id_table = max98373_id,
 };
index 56eb62b..34db388 100644 (file)
@@ -342,12 +342,15 @@ static int max98396_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
 {
        struct snd_soc_component *component = codec_dai->component;
        struct max98396_priv *max98396 = snd_soc_component_get_drvdata(component);
-       unsigned int format = 0;
+       unsigned int format_mask, format = 0;
        unsigned int bclk_pol = 0;
        int ret, status;
        int reg;
        bool update = false;
 
+       format_mask = MAX98396_PCM_MODE_CFG_FORMAT_MASK |
+                     MAX98396_PCM_MODE_CFG_LRCLKEDGE;
+
        dev_dbg(component->dev, "%s: fmt 0x%08X\n", __func__, fmt);
 
        switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
@@ -395,7 +398,7 @@ static int max98396_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
                ret = regmap_read(max98396->regmap, MAX98396_R2041_PCM_MODE_CFG, &reg);
                if (ret < 0)
                        return -EINVAL;
-               if (format != (reg & MAX98396_PCM_BCLKEDGE_BSEL_MASK)) {
+               if (format != (reg & format_mask)) {
                        update = true;
                } else {
                        ret = regmap_read(max98396->regmap,
@@ -412,8 +415,7 @@ static int max98396_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
 
        regmap_update_bits(max98396->regmap,
                           MAX98396_R2041_PCM_MODE_CFG,
-                          MAX98396_PCM_BCLKEDGE_BSEL_MASK,
-                          format);
+                          format_mask, format);
 
        regmap_update_bits(max98396->regmap,
                           MAX98396_R2042_PCM_CLK_SETUP,
index 1c11b42..72f673f 100644 (file)
@@ -691,6 +691,16 @@ static int rt1308_sdw_probe(struct sdw_slave *slave,
        return 0;
 }
 
+static int rt1308_sdw_remove(struct sdw_slave *slave)
+{
+       struct rt1308_sdw_priv *rt1308 = dev_get_drvdata(&slave->dev);
+
+       if (rt1308->first_hw_init)
+               pm_runtime_disable(&slave->dev);
+
+       return 0;
+}
+
 static const struct sdw_device_id rt1308_id[] = {
        SDW_SLAVE_ENTRY_EXT(0x025d, 0x1308, 0x2, 0, 0),
        {},
@@ -750,6 +760,7 @@ static struct sdw_driver rt1308_sdw_driver = {
                .pm = &rt1308_pm,
        },
        .probe = rt1308_sdw_probe,
+       .remove = rt1308_sdw_remove,
        .ops = &rt1308_slave_ops,
        .id_table = rt1308_id,
 };
index 60baa9f..2d6b5f9 100644 (file)
@@ -676,6 +676,16 @@ static int rt1316_sdw_probe(struct sdw_slave *slave,
        return rt1316_sdw_init(&slave->dev, regmap, slave);
 }
 
+static int rt1316_sdw_remove(struct sdw_slave *slave)
+{
+       struct rt1316_sdw_priv *rt1316 = dev_get_drvdata(&slave->dev);
+
+       if (rt1316->first_hw_init)
+               pm_runtime_disable(&slave->dev);
+
+       return 0;
+}
+
 static const struct sdw_device_id rt1316_id[] = {
        SDW_SLAVE_ENTRY_EXT(0x025d, 0x1316, 0x3, 0x1, 0),
        {},
@@ -735,6 +745,7 @@ static struct sdw_driver rt1316_sdw_driver = {
                .pm = &rt1316_pm,
        },
        .probe = rt1316_sdw_probe,
+       .remove = rt1316_sdw_remove,
        .ops = &rt1316_slave_ops,
        .id_table = rt1316_id,
 };
index 69c80d8..18b3da9 100644 (file)
@@ -1984,7 +1984,12 @@ static int rt5640_set_bias_level(struct snd_soc_component *component,
                snd_soc_component_write(component, RT5640_PWR_DIG2, 0x0000);
                snd_soc_component_write(component, RT5640_PWR_VOL, 0x0000);
                snd_soc_component_write(component, RT5640_PWR_MIXER, 0x0000);
-               snd_soc_component_write(component, RT5640_PWR_ANLG1, 0x0000);
+               if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER)
+                       snd_soc_component_write(component, RT5640_PWR_ANLG1,
+                               0x0018);
+               else
+                       snd_soc_component_write(component, RT5640_PWR_ANLG1,
+                               0x0000);
                snd_soc_component_write(component, RT5640_PWR_ANLG2, 0x0000);
                break;
 
@@ -2393,9 +2398,15 @@ static void rt5640_jack_work(struct work_struct *work)
 static irqreturn_t rt5640_irq(int irq, void *data)
 {
        struct rt5640_priv *rt5640 = data;
+       int delay = 0;
+
+       if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER) {
+               cancel_delayed_work_sync(&rt5640->jack_work);
+               delay = 100;
+       }
 
        if (rt5640->jack)
-               queue_delayed_work(system_long_wq, &rt5640->jack_work, 0);
+               queue_delayed_work(system_long_wq, &rt5640->jack_work, delay);
 
        return IRQ_HANDLED;
 }
@@ -2580,6 +2591,12 @@ static void rt5640_enable_hda_jack_detect(
 
        snd_soc_component_update_bits(component, RT5640_DUMMY1, 0x400, 0x0);
 
+       snd_soc_component_update_bits(component, RT5640_PWR_ANLG1,
+               RT5640_PWR_VREF2, RT5640_PWR_VREF2);
+       usleep_range(10000, 15000);
+       snd_soc_component_update_bits(component, RT5640_PWR_ANLG1,
+               RT5640_PWR_FV2, RT5640_PWR_FV2);
+
        rt5640->jack = jack;
 
        ret = request_irq(rt5640->irq, rt5640_irq,
@@ -2696,16 +2713,13 @@ static int rt5640_probe(struct snd_soc_component *component)
 
        if (device_property_read_u32(component->dev,
                                     "realtek,jack-detect-source", &val) == 0) {
-               if (val <= RT5640_JD_SRC_GPIO4) {
+               if (val <= RT5640_JD_SRC_GPIO4)
                        rt5640->jd_src = val << RT5640_JD_SFT;
-               } else if (val == RT5640_JD_SRC_HDA_HEADER) {
+               else if (val == RT5640_JD_SRC_HDA_HEADER)
                        rt5640->jd_src = RT5640_JD_SRC_HDA_HEADER;
-                       snd_soc_component_update_bits(component, RT5640_DUMMY1,
-                               0x0300, 0x0);
-               } else {
+               else
                        dev_warn(component->dev, "Warning: Invalid jack-detect-source value: %d, leaving jack-detect disabled\n",
                                 val);
-               }
        }
 
        if (!device_property_read_bool(component->dev, "realtek,jack-detect-not-inverted"))
index 248257a..f04e18c 100644 (file)
@@ -719,9 +719,12 @@ static int rt5682_sdw_remove(struct sdw_slave *slave)
 {
        struct rt5682_priv *rt5682 = dev_get_drvdata(&slave->dev);
 
-       if (rt5682 && rt5682->hw_init)
+       if (rt5682->hw_init)
                cancel_delayed_work_sync(&rt5682->jack_detect_work);
 
+       if (rt5682->first_hw_init)
+               pm_runtime_disable(&slave->dev);
+
        return 0;
 }
 
index bda5948..f7439e4 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/soundwire/sdw_type.h>
 #include <linux/soundwire/sdw_registers.h>
 #include <linux/module.h>
+#include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 #include <sound/soc.h>
 #include "rt700.h"
@@ -463,11 +464,14 @@ static int rt700_sdw_remove(struct sdw_slave *slave)
 {
        struct rt700_priv *rt700 = dev_get_drvdata(&slave->dev);
 
-       if (rt700 && rt700->hw_init) {
+       if (rt700->hw_init) {
                cancel_delayed_work_sync(&rt700->jack_detect_work);
                cancel_delayed_work_sync(&rt700->jack_btn_check_work);
        }
 
+       if (rt700->first_hw_init)
+               pm_runtime_disable(&slave->dev);
+
        return 0;
 }
 
index af32295..9bceeeb 100644 (file)
@@ -162,7 +162,7 @@ static void rt700_jack_detect_handler(struct work_struct *work)
        if (!rt700->hs_jack)
                return;
 
-       if (!rt700->component->card->instantiated)
+       if (!rt700->component->card || !rt700->component->card->instantiated)
                return;
 
        reg = RT700_VERB_GET_PIN_SENSE | RT700_HP_OUT;
@@ -315,17 +315,27 @@ static int rt700_set_jack_detect(struct snd_soc_component *component,
        struct snd_soc_jack *hs_jack, void *data)
 {
        struct rt700_priv *rt700 = snd_soc_component_get_drvdata(component);
+       int ret;
 
        rt700->hs_jack = hs_jack;
 
-       if (!rt700->hw_init) {
-               dev_dbg(&rt700->slave->dev,
-                       "%s hw_init not ready yet\n", __func__);
+       ret = pm_runtime_resume_and_get(component->dev);
+       if (ret < 0) {
+               if (ret != -EACCES) {
+                       dev_err(component->dev, "%s: failed to resume %d\n", __func__, ret);
+                       return ret;
+               }
+
+               /* pm_runtime not enabled yet */
+               dev_dbg(component->dev, "%s: skipping jack init for now\n", __func__);
                return 0;
        }
 
        rt700_jack_init(rt700);
 
+       pm_runtime_mark_last_busy(component->dev);
+       pm_runtime_put_autosuspend(component->dev);
+
        return 0;
 }
 
@@ -1115,6 +1125,11 @@ int rt700_init(struct device *dev, struct regmap *sdw_regmap,
 
        mutex_init(&rt700->disable_irq_lock);
 
+       INIT_DELAYED_WORK(&rt700->jack_detect_work,
+                         rt700_jack_detect_handler);
+       INIT_DELAYED_WORK(&rt700->jack_btn_check_work,
+                         rt700_btn_check_handler);
+
        /*
         * Mark hw_init to false
         * HW init will be performed when device reports present
@@ -1209,13 +1224,6 @@ int rt700_io_init(struct device *dev, struct sdw_slave *slave)
        /* Finish Initial Settings, set power to D3 */
        regmap_write(rt700->regmap, RT700_SET_AUDIO_POWER_STATE, AC_PWRST_D3);
 
-       if (!rt700->first_hw_init) {
-               INIT_DELAYED_WORK(&rt700->jack_detect_work,
-                       rt700_jack_detect_handler);
-               INIT_DELAYED_WORK(&rt700->jack_btn_check_work,
-                       rt700_btn_check_handler);
-       }
-
        /*
         * if set_jack callback occurred early than io_init,
         * we set up the jack detection function now
index aaf5af1..a085b2f 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/soundwire/sdw_registers.h>
 #include <linux/module.h>
+#include <linux/pm_runtime.h>
 
 #include "rt711-sdca.h"
 #include "rt711-sdca-sdw.h"
@@ -364,11 +365,17 @@ static int rt711_sdca_sdw_remove(struct sdw_slave *slave)
 {
        struct rt711_sdca_priv *rt711 = dev_get_drvdata(&slave->dev);
 
-       if (rt711 && rt711->hw_init) {
+       if (rt711->hw_init) {
                cancel_delayed_work_sync(&rt711->jack_detect_work);
                cancel_delayed_work_sync(&rt711->jack_btn_check_work);
        }
 
+       if (rt711->first_hw_init)
+               pm_runtime_disable(&slave->dev);
+
+       mutex_destroy(&rt711->calibrate_mutex);
+       mutex_destroy(&rt711->disable_irq_lock);
+
        return 0;
 }
 
index 57629c1..5ad53bb 100644 (file)
@@ -34,7 +34,7 @@ static int rt711_sdca_index_write(struct rt711_sdca_priv *rt711,
 
        ret = regmap_write(regmap, addr, value);
        if (ret < 0)
-               dev_err(rt711->component->dev,
+               dev_err(&rt711->slave->dev,
                        "Failed to set private value: %06x <= %04x ret=%d\n",
                        addr, value, ret);
 
@@ -50,7 +50,7 @@ static int rt711_sdca_index_read(struct rt711_sdca_priv *rt711,
 
        ret = regmap_read(regmap, addr, value);
        if (ret < 0)
-               dev_err(rt711->component->dev,
+               dev_err(&rt711->slave->dev,
                        "Failed to get private value: %06x => %04x ret=%d\n",
                        addr, *value, ret);
 
@@ -294,7 +294,7 @@ static void rt711_sdca_jack_detect_handler(struct work_struct *work)
        if (!rt711->hs_jack)
                return;
 
-       if (!rt711->component->card->instantiated)
+       if (!rt711->component->card || !rt711->component->card->instantiated)
                return;
 
        /* SDW_SCP_SDCA_INT_SDCA_0 is used for jack detection */
@@ -487,16 +487,27 @@ static int rt711_sdca_set_jack_detect(struct snd_soc_component *component,
        struct snd_soc_jack *hs_jack, void *data)
 {
        struct rt711_sdca_priv *rt711 = snd_soc_component_get_drvdata(component);
+       int ret;
 
        rt711->hs_jack = hs_jack;
 
-       if (!rt711->hw_init) {
-               dev_dbg(&rt711->slave->dev,
-                       "%s hw_init not ready yet\n", __func__);
+       ret = pm_runtime_resume_and_get(component->dev);
+       if (ret < 0) {
+               if (ret != -EACCES) {
+                       dev_err(component->dev, "%s: failed to resume %d\n", __func__, ret);
+                       return ret;
+               }
+
+               /* pm_runtime not enabled yet */
+               dev_dbg(component->dev, "%s: skipping jack init for now\n", __func__);
                return 0;
        }
 
        rt711_sdca_jack_init(rt711);
+
+       pm_runtime_mark_last_busy(component->dev);
+       pm_runtime_put_autosuspend(component->dev);
+
        return 0;
 }
 
@@ -1190,14 +1201,6 @@ static int rt711_sdca_probe(struct snd_soc_component *component)
        return 0;
 }
 
-static void rt711_sdca_remove(struct snd_soc_component *component)
-{
-       struct rt711_sdca_priv *rt711 = snd_soc_component_get_drvdata(component);
-
-       regcache_cache_only(rt711->regmap, true);
-       regcache_cache_only(rt711->mbq_regmap, true);
-}
-
 static const struct snd_soc_component_driver soc_sdca_dev_rt711 = {
        .probe = rt711_sdca_probe,
        .controls = rt711_sdca_snd_controls,
@@ -1207,7 +1210,6 @@ static const struct snd_soc_component_driver soc_sdca_dev_rt711 = {
        .dapm_routes = rt711_sdca_audio_map,
        .num_dapm_routes = ARRAY_SIZE(rt711_sdca_audio_map),
        .set_jack = rt711_sdca_set_jack_detect,
-       .remove = rt711_sdca_remove,
        .endianness = 1,
 };
 
@@ -1412,8 +1414,12 @@ int rt711_sdca_init(struct device *dev, struct regmap *regmap,
        rt711->regmap = regmap;
        rt711->mbq_regmap = mbq_regmap;
 
+       mutex_init(&rt711->calibrate_mutex);
        mutex_init(&rt711->disable_irq_lock);
 
+       INIT_DELAYED_WORK(&rt711->jack_detect_work, rt711_sdca_jack_detect_handler);
+       INIT_DELAYED_WORK(&rt711->jack_btn_check_work, rt711_sdca_btn_check_handler);
+
        /*
         * Mark hw_init to false
         * HW init will be performed when device reports present
@@ -1545,14 +1551,6 @@ int rt711_sdca_io_init(struct device *dev, struct sdw_slave *slave)
        rt711_sdca_index_update_bits(rt711, RT711_VENDOR_HDA_CTL,
                RT711_PUSH_BTN_INT_CTL0, 0x20, 0x00);
 
-       if (!rt711->first_hw_init) {
-               INIT_DELAYED_WORK(&rt711->jack_detect_work,
-                       rt711_sdca_jack_detect_handler);
-               INIT_DELAYED_WORK(&rt711->jack_btn_check_work,
-                       rt711_sdca_btn_check_handler);
-               mutex_init(&rt711->calibrate_mutex);
-       }
-
        /* calibration */
        ret = rt711_sdca_calibration(rt711);
        if (ret < 0)
index bda2cc9..4fe68bc 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/soundwire/sdw_type.h>
 #include <linux/soundwire/sdw_registers.h>
 #include <linux/module.h>
+#include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 #include <sound/soc.h>
 #include "rt711.h"
@@ -464,12 +465,18 @@ static int rt711_sdw_remove(struct sdw_slave *slave)
 {
        struct rt711_priv *rt711 = dev_get_drvdata(&slave->dev);
 
-       if (rt711 && rt711->hw_init) {
+       if (rt711->hw_init) {
                cancel_delayed_work_sync(&rt711->jack_detect_work);
                cancel_delayed_work_sync(&rt711->jack_btn_check_work);
                cancel_work_sync(&rt711->calibration_work);
        }
 
+       if (rt711->first_hw_init)
+               pm_runtime_disable(&slave->dev);
+
+       mutex_destroy(&rt711->calibrate_mutex);
+       mutex_destroy(&rt711->disable_irq_lock);
+
        return 0;
 }
 
index 9838fb4..9df800a 100644 (file)
@@ -242,7 +242,7 @@ static void rt711_jack_detect_handler(struct work_struct *work)
        if (!rt711->hs_jack)
                return;
 
-       if (!rt711->component->card->instantiated)
+       if (!rt711->component->card || !rt711->component->card->instantiated)
                return;
 
        if (pm_runtime_status_suspended(rt711->slave->dev.parent)) {
@@ -457,17 +457,27 @@ static int rt711_set_jack_detect(struct snd_soc_component *component,
        struct snd_soc_jack *hs_jack, void *data)
 {
        struct rt711_priv *rt711 = snd_soc_component_get_drvdata(component);
+       int ret;
 
        rt711->hs_jack = hs_jack;
 
-       if (!rt711->hw_init) {
-               dev_dbg(&rt711->slave->dev,
-                       "%s hw_init not ready yet\n", __func__);
+       ret = pm_runtime_resume_and_get(component->dev);
+       if (ret < 0) {
+               if (ret != -EACCES) {
+                       dev_err(component->dev, "%s: failed to resume %d\n", __func__, ret);
+                       return ret;
+               }
+
+               /* pm_runtime not enabled yet */
+               dev_dbg(component->dev, "%s: skipping jack init for now\n", __func__);
                return 0;
        }
 
        rt711_jack_init(rt711);
 
+       pm_runtime_mark_last_busy(component->dev);
+       pm_runtime_put_autosuspend(component->dev);
+
        return 0;
 }
 
@@ -932,13 +942,6 @@ static int rt711_probe(struct snd_soc_component *component)
        return 0;
 }
 
-static void rt711_remove(struct snd_soc_component *component)
-{
-       struct rt711_priv *rt711 = snd_soc_component_get_drvdata(component);
-
-       regcache_cache_only(rt711->regmap, true);
-}
-
 static const struct snd_soc_component_driver soc_codec_dev_rt711 = {
        .probe = rt711_probe,
        .set_bias_level = rt711_set_bias_level,
@@ -949,7 +952,6 @@ static const struct snd_soc_component_driver soc_codec_dev_rt711 = {
        .dapm_routes = rt711_audio_map,
        .num_dapm_routes = ARRAY_SIZE(rt711_audio_map),
        .set_jack = rt711_set_jack_detect,
-       .remove = rt711_remove,
        .endianness = 1,
 };
 
@@ -1204,8 +1206,13 @@ int rt711_init(struct device *dev, struct regmap *sdw_regmap,
        rt711->sdw_regmap = sdw_regmap;
        rt711->regmap = regmap;
 
+       mutex_init(&rt711->calibrate_mutex);
        mutex_init(&rt711->disable_irq_lock);
 
+       INIT_DELAYED_WORK(&rt711->jack_detect_work, rt711_jack_detect_handler);
+       INIT_DELAYED_WORK(&rt711->jack_btn_check_work, rt711_btn_check_handler);
+       INIT_WORK(&rt711->calibration_work, rt711_calibration_work);
+
        /*
         * Mark hw_init to false
         * HW init will be performed when device reports present
@@ -1313,15 +1320,8 @@ int rt711_io_init(struct device *dev, struct sdw_slave *slave)
 
        if (rt711->first_hw_init)
                rt711_calibration(rt711);
-       else {
-               INIT_DELAYED_WORK(&rt711->jack_detect_work,
-                       rt711_jack_detect_handler);
-               INIT_DELAYED_WORK(&rt711->jack_btn_check_work,
-                       rt711_btn_check_handler);
-               mutex_init(&rt711->calibrate_mutex);
-               INIT_WORK(&rt711->calibration_work, rt711_calibration_work);
+       else
                schedule_work(&rt711->calibration_work);
-       }
 
        /*
         * if set_jack callback occurred early than io_init,
index 0ecd294..13e731d 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/soundwire/sdw_type.h>
 #include <linux/soundwire/sdw_registers.h>
 #include <linux/module.h>
+#include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 #include <sound/soc.h>
 #include "rt715-sdca.h"
@@ -193,6 +194,16 @@ static int rt715_sdca_sdw_probe(struct sdw_slave *slave,
        return rt715_sdca_init(&slave->dev, mbq_regmap, regmap, slave);
 }
 
+static int rt715_sdca_sdw_remove(struct sdw_slave *slave)
+{
+       struct rt715_sdca_priv *rt715 = dev_get_drvdata(&slave->dev);
+
+       if (rt715->first_hw_init)
+               pm_runtime_disable(&slave->dev);
+
+       return 0;
+}
+
 static const struct sdw_device_id rt715_sdca_id[] = {
        SDW_SLAVE_ENTRY_EXT(0x025d, 0x715, 0x3, 0x1, 0),
        SDW_SLAVE_ENTRY_EXT(0x025d, 0x714, 0x3, 0x1, 0),
@@ -267,6 +278,7 @@ static struct sdw_driver rt715_sdw_driver = {
                .pm = &rt715_pm,
        },
        .probe = rt715_sdca_sdw_probe,
+       .remove = rt715_sdca_sdw_remove,
        .ops = &rt715_sdca_slave_ops,
        .id_table = rt715_sdca_id,
 };
index a7b21b0..b047bf8 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/soundwire/sdw_type.h>
 #include <linux/soundwire/sdw_registers.h>
 #include <linux/module.h>
+#include <linux/pm_runtime.h>
 #include <linux/of.h>
 #include <linux/regmap.h>
 #include <sound/soc.h>
@@ -514,6 +515,16 @@ static int rt715_sdw_probe(struct sdw_slave *slave,
        return 0;
 }
 
+static int rt715_sdw_remove(struct sdw_slave *slave)
+{
+       struct rt715_priv *rt715 = dev_get_drvdata(&slave->dev);
+
+       if (rt715->first_hw_init)
+               pm_runtime_disable(&slave->dev);
+
+       return 0;
+}
+
 static const struct sdw_device_id rt715_id[] = {
        SDW_SLAVE_ENTRY_EXT(0x025d, 0x714, 0x2, 0, 0),
        SDW_SLAVE_ENTRY_EXT(0x025d, 0x715, 0x2, 0, 0),
@@ -575,6 +586,7 @@ static struct sdw_driver rt715_sdw_driver = {
                   .pm = &rt715_pm,
                   },
        .probe = rt715_sdw_probe,
+       .remove = rt715_sdw_remove,
        .ops = &rt715_slave_ops,
        .id_table = rt715_id,
 };
index 2aa48ae..3363d16 100644 (file)
@@ -1795,6 +1795,9 @@ static int sgtl5000_i2c_remove(struct i2c_client *client)
 {
        struct sgtl5000_priv *sgtl5000 = i2c_get_clientdata(client);
 
+       regmap_write(sgtl5000->regmap, SGTL5000_CHIP_DIG_POWER, SGTL5000_DIG_POWER_DEFAULT);
+       regmap_write(sgtl5000->regmap, SGTL5000_CHIP_ANA_POWER, SGTL5000_ANA_POWER_DEFAULT);
+
        clk_disable_unprepare(sgtl5000->mclk);
        regulator_bulk_disable(sgtl5000->num_supplies, sgtl5000->supplies);
        regulator_bulk_free(sgtl5000->num_supplies, sgtl5000->supplies);
@@ -1802,6 +1805,11 @@ static int sgtl5000_i2c_remove(struct i2c_client *client)
        return 0;
 }
 
+static void sgtl5000_i2c_shutdown(struct i2c_client *client)
+{
+       sgtl5000_i2c_remove(client);
+}
+
 static const struct i2c_device_id sgtl5000_id[] = {
        {"sgtl5000", 0},
        {},
@@ -1822,6 +1830,7 @@ static struct i2c_driver sgtl5000_i2c_driver = {
        },
        .probe_new = sgtl5000_i2c_probe,
        .remove = sgtl5000_i2c_remove,
+       .shutdown = sgtl5000_i2c_shutdown,
        .id_table = sgtl5000_id,
 };
 
index 56ec586..3a808c7 100644 (file)
@@ -80,6 +80,7 @@
 /*
  * SGTL5000_CHIP_DIG_POWER
  */
+#define SGTL5000_DIG_POWER_DEFAULT             0x0000
 #define SGTL5000_ADC_EN                                0x0040
 #define SGTL5000_DAC_EN                                0x0020
 #define SGTL5000_DAP_POWERUP                   0x0010
index d395fef..4cb788f 100644 (file)
@@ -42,10 +42,12 @@ static void tas2764_reset(struct tas2764_priv *tas2764)
                gpiod_set_value_cansleep(tas2764->reset_gpio, 0);
                msleep(20);
                gpiod_set_value_cansleep(tas2764->reset_gpio, 1);
+               usleep_range(1000, 2000);
        }
 
        snd_soc_component_write(tas2764->component, TAS2764_SW_RST,
                                TAS2764_RST);
+       usleep_range(1000, 2000);
 }
 
 static int tas2764_set_bias_level(struct snd_soc_component *component,
@@ -107,8 +109,10 @@ static int tas2764_codec_resume(struct snd_soc_component *component)
        struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component);
        int ret;
 
-       if (tas2764->sdz_gpio)
+       if (tas2764->sdz_gpio) {
                gpiod_set_value_cansleep(tas2764->sdz_gpio, 1);
+               usleep_range(1000, 2000);
+       }
 
        ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL,
                                            TAS2764_PWR_CTRL_MASK,
@@ -131,7 +135,8 @@ static const char * const tas2764_ASI1_src[] = {
 };
 
 static SOC_ENUM_SINGLE_DECL(
-       tas2764_ASI1_src_enum, TAS2764_TDM_CFG2, 4, tas2764_ASI1_src);
+       tas2764_ASI1_src_enum, TAS2764_TDM_CFG2, TAS2764_TDM_CFG2_SCFG_SHIFT,
+       tas2764_ASI1_src);
 
 static const struct snd_kcontrol_new tas2764_asi1_mux =
        SOC_DAPM_ENUM("ASI1 Source", tas2764_ASI1_src_enum);
@@ -329,20 +334,22 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 {
        struct snd_soc_component *component = dai->component;
        struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component);
-       u8 tdm_rx_start_slot = 0, asi_cfg_1 = 0;
-       int iface;
+       u8 tdm_rx_start_slot = 0, asi_cfg_0 = 0, asi_cfg_1 = 0;
        int ret;
 
        switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+       case SND_SOC_DAIFMT_NB_IF:
+               asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START;
+               fallthrough;
        case SND_SOC_DAIFMT_NB_NF:
                asi_cfg_1 = TAS2764_TDM_CFG1_RX_RISING;
                break;
+       case SND_SOC_DAIFMT_IB_IF:
+               asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START;
+               fallthrough;
        case SND_SOC_DAIFMT_IB_NF:
                asi_cfg_1 = TAS2764_TDM_CFG1_RX_FALLING;
                break;
-       default:
-               dev_err(tas2764->dev, "ASI format Inverse is not found\n");
-               return -EINVAL;
        }
 
        ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG1,
@@ -353,13 +360,13 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_I2S:
+               asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START;
+               fallthrough;
        case SND_SOC_DAIFMT_DSP_A:
-               iface = TAS2764_TDM_CFG2_SCFG_I2S;
                tdm_rx_start_slot = 1;
                break;
        case SND_SOC_DAIFMT_DSP_B:
        case SND_SOC_DAIFMT_LEFT_J:
-               iface = TAS2764_TDM_CFG2_SCFG_LEFT_J;
                tdm_rx_start_slot = 0;
                break;
        default:
@@ -368,14 +375,15 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
                return -EINVAL;
        }
 
-       ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG1,
-                                           TAS2764_TDM_CFG1_MASK,
-                                           (tdm_rx_start_slot << TAS2764_TDM_CFG1_51_SHIFT));
+       ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG0,
+                                           TAS2764_TDM_CFG0_FRAME_START,
+                                           asi_cfg_0);
        if (ret < 0)
                return ret;
 
-       ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG2,
-                                           TAS2764_TDM_CFG2_SCFG_MASK, iface);
+       ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG1,
+                                           TAS2764_TDM_CFG1_MASK,
+                                           (tdm_rx_start_slot << TAS2764_TDM_CFG1_51_SHIFT));
        if (ret < 0)
                return ret;
 
@@ -501,8 +509,10 @@ static int tas2764_codec_probe(struct snd_soc_component *component)
 
        tas2764->component = component;
 
-       if (tas2764->sdz_gpio)
+       if (tas2764->sdz_gpio) {
                gpiod_set_value_cansleep(tas2764->sdz_gpio, 1);
+               usleep_range(1000, 2000);
+       }
 
        tas2764_reset(tas2764);
 
@@ -526,12 +536,12 @@ static int tas2764_codec_probe(struct snd_soc_component *component)
 }
 
 static DECLARE_TLV_DB_SCALE(tas2764_digital_tlv, 1100, 50, 0);
-static DECLARE_TLV_DB_SCALE(tas2764_playback_volume, -10000, 50, 0);
+static DECLARE_TLV_DB_SCALE(tas2764_playback_volume, -10050, 50, 1);
 
 static const struct snd_kcontrol_new tas2764_snd_controls[] = {
        SOC_SINGLE_TLV("Speaker Volume", TAS2764_DVC, 0,
                       TAS2764_DVC_MAX, 1, tas2764_playback_volume),
-       SOC_SINGLE_TLV("Amp Gain Volume", TAS2764_CHNL_0, 0, 0x14, 0,
+       SOC_SINGLE_TLV("Amp Gain Volume", TAS2764_CHNL_0, 1, 0x14, 0,
                       tas2764_digital_tlv),
 };
 
@@ -556,7 +566,7 @@ static const struct reg_default tas2764_reg_defaults[] = {
        { TAS2764_SW_RST, 0x00 },
        { TAS2764_PWR_CTRL, 0x1a },
        { TAS2764_DVC, 0x00 },
-       { TAS2764_CHNL_0, 0x00 },
+       { TAS2764_CHNL_0, 0x28 },
        { TAS2764_TDM_CFG0, 0x09 },
        { TAS2764_TDM_CFG1, 0x02 },
        { TAS2764_TDM_CFG2, 0x0a },
index 67d6fd9..f015f22 100644 (file)
@@ -47,6 +47,7 @@
 #define TAS2764_TDM_CFG0_MASK          GENMASK(3, 1)
 #define TAS2764_TDM_CFG0_44_1_48KHZ    BIT(3)
 #define TAS2764_TDM_CFG0_88_2_96KHZ    (BIT(3) | BIT(1))
+#define TAS2764_TDM_CFG0_FRAME_START   BIT(0)
 
 /* TDM Configuration Reg1 */
 #define TAS2764_TDM_CFG1               TAS2764_REG(0X0, 0x09)
 #define TAS2764_TDM_CFG2_RXS_16BITS    0x0
 #define TAS2764_TDM_CFG2_RXS_24BITS    BIT(0)
 #define TAS2764_TDM_CFG2_RXS_32BITS    BIT(1)
-#define TAS2764_TDM_CFG2_SCFG_MASK     GENMASK(5, 4)
-#define TAS2764_TDM_CFG2_SCFG_I2S      0x0
-#define TAS2764_TDM_CFG2_SCFG_LEFT_J   BIT(4)
-#define TAS2764_TDM_CFG2_SCFG_RIGHT_J  BIT(5)
+#define TAS2764_TDM_CFG2_SCFG_SHIFT    4
 
 /* TDM Configuration Reg3 */
 #define TAS2764_TDM_CFG3               TAS2764_REG(0X0, 0x0c)
index b55f0b8..0b72965 100644 (file)
@@ -33,7 +33,6 @@ struct adcx140_priv {
        bool micbias_vg;
 
        unsigned int dai_fmt;
-       unsigned int tdm_delay;
        unsigned int slot_width;
 };
 
@@ -792,12 +791,13 @@ static int adcx140_set_dai_tdm_slot(struct snd_soc_dai *codec_dai,
 {
        struct snd_soc_component *component = codec_dai->component;
        struct adcx140_priv *adcx140 = snd_soc_component_get_drvdata(component);
-       unsigned int lsb;
 
-       /* TDM based on DSP mode requires slots to be adjacent */
-       lsb = __ffs(tx_mask);
-       if ((lsb + 1) != __fls(tx_mask)) {
-               dev_err(component->dev, "Invalid mask, slots must be adjacent\n");
+       /*
+        * The chip itself supports arbitrary masks, but the driver currently
+        * only supports adjacent slots beginning at the first slot.
+        */
+       if (tx_mask != GENMASK(__fls(tx_mask), 0)) {
+               dev_err(component->dev, "Only lower adjacent slots are supported\n");
                return -EINVAL;
        }
 
@@ -812,7 +812,6 @@ static int adcx140_set_dai_tdm_slot(struct snd_soc_dai *codec_dai,
                return -EINVAL;
        }
 
-       adcx140->tdm_delay = lsb;
        adcx140->slot_width = slot_width;
 
        return 0;
index 617a36a..3cb7a3e 100644 (file)
@@ -342,7 +342,7 @@ struct wcd9335_codec {
        struct regulator_bulk_data supplies[WCD9335_MAX_SUPPLY];
 
        unsigned int rx_port_value[WCD9335_RX_MAX];
-       unsigned int tx_port_value;
+       unsigned int tx_port_value[WCD9335_TX_MAX];
        int hph_l_gain;
        int hph_r_gain;
        u32 rx_bias_count;
@@ -1287,11 +1287,17 @@ static int slim_rx_mux_put(struct snd_kcontrol *kc,
        struct snd_soc_dapm_update *update = NULL;
        u32 port_id = w->shift;
 
+       if (wcd->rx_port_value[port_id] == ucontrol->value.enumerated.item[0])
+               return 0;
+
        wcd->rx_port_value[port_id] = ucontrol->value.enumerated.item[0];
 
+       /* Remove channel from any list it's in before adding it to a new one */
+       list_del_init(&wcd->rx_chs[port_id].list);
+
        switch (wcd->rx_port_value[port_id]) {
        case 0:
-               list_del_init(&wcd->rx_chs[port_id].list);
+               /* Channel already removed from lists. Nothing to do here */
                break;
        case 1:
                list_add_tail(&wcd->rx_chs[port_id].list,
@@ -1328,8 +1334,13 @@ static int slim_tx_mixer_get(struct snd_kcontrol *kc,
 
        struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kc);
        struct wcd9335_codec *wcd = dev_get_drvdata(dapm->dev);
+       struct snd_soc_dapm_widget *widget = snd_soc_dapm_kcontrol_widget(kc);
+       struct soc_mixer_control *mixer =
+                       (struct soc_mixer_control *)kc->private_value;
+       int dai_id = widget->shift;
+       int port_id = mixer->shift;
 
-       ucontrol->value.integer.value[0] = wcd->tx_port_value;
+       ucontrol->value.integer.value[0] = wcd->tx_port_value[port_id] == dai_id;
 
        return 0;
 }
@@ -1352,12 +1363,12 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kc,
        case AIF2_CAP:
        case AIF3_CAP:
                /* only add to the list if value not set */
-               if (enable && !(wcd->tx_port_value & BIT(port_id))) {
-                       wcd->tx_port_value |= BIT(port_id);
+               if (enable && wcd->tx_port_value[port_id] != dai_id) {
+                       wcd->tx_port_value[port_id] = dai_id;
                        list_add_tail(&wcd->tx_chs[port_id].list,
                                        &wcd->dai[dai_id].slim_ch_list);
-               } else if (!enable && (wcd->tx_port_value & BIT(port_id))) {
-                       wcd->tx_port_value &= ~BIT(port_id);
+               } else if (!enable && wcd->tx_port_value[port_id] == dai_id) {
+                       wcd->tx_port_value[port_id] = -1;
                        list_del_init(&wcd->tx_chs[port_id].list);
                }
                break;
index c1b61b9..781ae56 100644 (file)
@@ -2519,6 +2519,9 @@ static int wcd938x_tx_mode_put(struct snd_kcontrol *kcontrol,
        struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
        int path = e->shift_l;
 
+       if (wcd938x->tx_mode[path] == ucontrol->value.enumerated.item[0])
+               return 0;
+
        wcd938x->tx_mode[path] = ucontrol->value.enumerated.item[0];
 
        return 1;
@@ -2541,6 +2544,9 @@ static int wcd938x_rx_hph_mode_put(struct snd_kcontrol *kcontrol,
        struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
        struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
 
+       if (wcd938x->hph_mode == ucontrol->value.enumerated.item[0])
+               return 0;
+
        wcd938x->hph_mode = ucontrol->value.enumerated.item[0];
 
        return 1;
@@ -2632,6 +2638,9 @@ static int wcd938x_ldoh_put(struct snd_kcontrol *kcontrol,
        struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
        struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
 
+       if (wcd938x->ldoh == ucontrol->value.integer.value[0])
+               return 0;
+
        wcd938x->ldoh = ucontrol->value.integer.value[0];
 
        return 1;
@@ -2654,6 +2663,9 @@ static int wcd938x_bcs_put(struct snd_kcontrol *kcontrol,
        struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
        struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
 
+       if (wcd938x->bcs_dis == ucontrol->value.integer.value[0])
+               return 0;
+
        wcd938x->bcs_dis = ucontrol->value.integer.value[0];
 
        return 1;
index da2f899..b034df4 100644 (file)
@@ -680,12 +680,17 @@ static int wm5102_out_comp_coeff_put(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
        struct arizona *arizona = dev_get_drvdata(component->dev->parent);
+       uint16_t dac_comp_coeff = get_unaligned_be16(ucontrol->value.bytes.data);
+       int ret = 0;
 
        mutex_lock(&arizona->dac_comp_lock);
-       arizona->dac_comp_coeff = get_unaligned_be16(ucontrol->value.bytes.data);
+       if (arizona->dac_comp_coeff != dac_comp_coeff) {
+               arizona->dac_comp_coeff = dac_comp_coeff;
+               ret = 1;
+       }
        mutex_unlock(&arizona->dac_comp_lock);
 
-       return 0;
+       return ret;
 }
 
 static int wm5102_out_comp_switch_get(struct snd_kcontrol *kcontrol,
@@ -706,12 +711,20 @@ static int wm5102_out_comp_switch_put(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
        struct arizona *arizona = dev_get_drvdata(component->dev->parent);
+       struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value;
+       int ret = 0;
+
+       if (ucontrol->value.integer.value[0] > mc->max)
+               return -EINVAL;
 
        mutex_lock(&arizona->dac_comp_lock);
-       arizona->dac_comp_enabled = ucontrol->value.integer.value[0];
+       if (arizona->dac_comp_enabled != ucontrol->value.integer.value[0]) {
+               arizona->dac_comp_enabled = ucontrol->value.integer.value[0];
+               ret = 1;
+       }
        mutex_unlock(&arizona->dac_comp_lock);
 
-       return 0;
+       return ret;
 }
 
 static const char * const wm5102_osr_text[] = {
index 4973ba1..4ab7a67 100644 (file)
@@ -413,6 +413,7 @@ static int wm5110_put_dre(struct snd_kcontrol *kcontrol,
        unsigned int rnew = (!!ucontrol->value.integer.value[1]) << mc->rshift;
        unsigned int lold, rold;
        unsigned int lena, rena;
+       bool change = false;
        int ret;
 
        snd_soc_dapm_mutex_lock(dapm);
@@ -440,8 +441,8 @@ static int wm5110_put_dre(struct snd_kcontrol *kcontrol,
                goto err;
        }
 
-       ret = regmap_update_bits(arizona->regmap, ARIZONA_DRE_ENABLE,
-                                mask, lnew | rnew);
+       ret = regmap_update_bits_check(arizona->regmap, ARIZONA_DRE_ENABLE,
+                                      mask, lnew | rnew, &change);
        if (ret) {
                dev_err(arizona->dev, "Failed to set DRE: %d\n", ret);
                goto err;
@@ -454,6 +455,9 @@ static int wm5110_put_dre(struct snd_kcontrol *kcontrol,
        if (!rnew && rold)
                wm5110_clear_pga_volume(arizona, mc->rshift);
 
+       if (change)
+               ret = 1;
+
 err:
        snd_soc_dapm_mutex_unlock(dapm);
 
index 00b59fc..ab54811 100644 (file)
@@ -108,6 +108,7 @@ static int wm8998_inmux_put(struct snd_kcontrol *kcontrol,
        struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
        unsigned int mode_reg, mode_index;
        unsigned int mux, inmode, src_val, mode_val;
+       int change, ret;
 
        mux = ucontrol->value.enumerated.item[0];
        if (mux > 1)
@@ -137,14 +138,20 @@ static int wm8998_inmux_put(struct snd_kcontrol *kcontrol,
        snd_soc_component_update_bits(component, mode_reg,
                                      ARIZONA_IN1_MODE_MASK, mode_val);
 
-       snd_soc_component_update_bits(component, e->reg,
-                                     ARIZONA_IN1L_SRC_MASK |
-                                     ARIZONA_IN1L_SRC_SE_MASK,
-                                     src_val);
+       change = snd_soc_component_update_bits(component, e->reg,
+                                              ARIZONA_IN1L_SRC_MASK |
+                                              ARIZONA_IN1L_SRC_SE_MASK,
+                                              src_val);
 
-       return snd_soc_dapm_mux_update_power(dapm, kcontrol,
-                                            ucontrol->value.enumerated.item[0],
-                                            e, NULL);
+       ret = snd_soc_dapm_mux_update_power(dapm, kcontrol,
+                                           ucontrol->value.enumerated.item[0],
+                                           e, NULL);
+       if (ret < 0) {
+               dev_err(arizona->dev, "Failed to update demux power state: %d\n", ret);
+               return ret;
+       }
+
+       return change;
 }
 
 static const char * const wm8998_inmux_texts[] = {
index 6d7fd88..a7784ac 100644 (file)
@@ -997,7 +997,7 @@ int wm_adsp2_preloader_put(struct snd_kcontrol *kcontrol,
                snd_soc_dapm_sync(dapm);
        }
 
-       return 0;
+       return 1;
 }
 EXPORT_SYMBOL_GPL(wm_adsp2_preloader_put);
 
index 77ac405..d34b29a 100644 (file)
@@ -90,12 +90,12 @@ links indicates connection part of CPU side (= A).
                        ports@0 {
 (X) (A)                        mcpu:   port@0 { mcpu0_ep: endpoint { remote-endpoint = <&mcodec0_ep>; }; };
 (y)                            port@1 { mcpu1_ep: endpoint { remote-endpoint = <&cpu1_ep>; }; };
-(y)                            port@1 { mcpu2_ep: endpoint { remote-endpoint = <&cpu2_ep>; }; };
+(y)                            port@2 { mcpu2_ep: endpoint { remote-endpoint = <&cpu2_ep>; }; };
                        };
                        ports@1 {
 (X)                            port@0 { mcodec0_ep: endpoint { remote-endpoint = <&mcpu0_ep>; }; };
-(y)                            port@0 { mcodec1_ep: endpoint { remote-endpoint = <&codec1_ep>; }; };
-(y)                            port@1 { mcodec2_ep: endpoint { remote-endpoint = <&codec2_ep>; }; };
+(y)                            port@1 { mcodec1_ep: endpoint { remote-endpoint = <&codec1_ep>; }; };
+(y)                            port@2 { mcodec2_ep: endpoint { remote-endpoint = <&codec2_ep>; }; };
                        };
                };
        };
index 0d11cc8..6a06fe3 100644 (file)
@@ -128,10 +128,10 @@ struct avs_tplg_token_parser {
 static int
 avs_parse_uuid_token(struct snd_soc_component *comp, void *elem, void *object, u32 offset)
 {
-       struct snd_soc_tplg_vendor_value_elem *tuple = elem;
+       struct snd_soc_tplg_vendor_uuid_elem *tuple = elem;
        guid_t *val = (guid_t *)((u8 *)object + offset);
 
-       guid_copy((guid_t *)val, (const guid_t *)&tuple->value);
+       guid_copy((guid_t *)val, (const guid_t *)&tuple->uuid);
 
        return 0;
 }
index 00384c6..330c0ac 100644 (file)
@@ -421,8 +421,17 @@ static int snd_byt_wm5102_mc_probe(struct platform_device *pdev)
        priv->spkvdd_en_gpio = gpiod_get(codec_dev, "wlf,spkvdd-ena", GPIOD_OUT_LOW);
        put_device(codec_dev);
 
-       if (IS_ERR(priv->spkvdd_en_gpio))
-               return dev_err_probe(dev, PTR_ERR(priv->spkvdd_en_gpio), "getting spkvdd-GPIO\n");
+       if (IS_ERR(priv->spkvdd_en_gpio)) {
+               ret = PTR_ERR(priv->spkvdd_en_gpio);
+               /*
+                * The spkvdd gpio-lookup is registered by: drivers/mfd/arizona-spi.c,
+                * so -ENOENT means that arizona-spi hasn't probed yet.
+                */
+               if (ret == -ENOENT)
+                       ret = -EPROBE_DEFER;
+
+               return dev_err_probe(dev, ret, "getting spkvdd-GPIO\n");
+       }
 
        /* override platform name, if required */
        byt_wm5102_card.dev = dev;
index 5d67a2c..4a90a0a 100644 (file)
@@ -69,11 +69,10 @@ static unsigned long sof_rt5682_quirk = SOF_RT5682_MCLK_EN |
 
 static int is_legacy_cpu;
 
-static struct snd_soc_jack sof_hdmi[3];
-
 struct sof_hdmi_pcm {
        struct list_head head;
        struct snd_soc_dai *codec_dai;
+       struct snd_soc_jack hdmi_jack;
        int device;
 };
 
@@ -434,7 +433,6 @@ static int sof_card_late_probe(struct snd_soc_card *card)
        char jack_name[NAME_SIZE];
        struct sof_hdmi_pcm *pcm;
        int err;
-       int i = 0;
 
        /* HDMI is not supported by SOF on Baytrail/CherryTrail */
        if (is_legacy_cpu || !ctx->idisp_codec)
@@ -455,17 +453,15 @@ static int sof_card_late_probe(struct snd_soc_card *card)
                snprintf(jack_name, sizeof(jack_name),
                         "HDMI/DP, pcm=%d Jack", pcm->device);
                err = snd_soc_card_jack_new(card, jack_name,
-                                           SND_JACK_AVOUT, &sof_hdmi[i]);
+                                           SND_JACK_AVOUT, &pcm->hdmi_jack);
 
                if (err)
                        return err;
 
                err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device,
-                                         &sof_hdmi[i]);
+                                         &pcm->hdmi_jack);
                if (err < 0)
                        return err;
-
-               i++;
        }
 
        if (sof_rt5682_quirk & SOF_MAX98373_SPEAKER_AMP_PRESENT) {
index 1f00679..ad826ad 100644 (file)
@@ -1398,6 +1398,33 @@ static struct snd_soc_card card_sof_sdw = {
        .late_probe = sof_sdw_card_late_probe,
 };
 
+static void mc_dailink_exit_loop(struct snd_soc_card *card)
+{
+       struct snd_soc_dai_link *link;
+       int ret;
+       int i, j;
+
+       for (i = 0; i < ARRAY_SIZE(codec_info_list); i++) {
+               if (!codec_info_list[i].exit)
+                       continue;
+               /*
+                * We don't need to call .exit function if there is no matched
+                * dai link found.
+                */
+               for_each_card_prelinks(card, j, link) {
+                       if (!strcmp(link->codecs[0].dai_name,
+                                   codec_info_list[i].dai_name)) {
+                               ret = codec_info_list[i].exit(card, link);
+                               if (ret)
+                                       dev_warn(card->dev,
+                                                "codec exit failed %d\n",
+                                                ret);
+                               break;
+                       }
+               }
+       }
+}
+
 static int mc_probe(struct platform_device *pdev)
 {
        struct snd_soc_card *card = &card_sof_sdw;
@@ -1462,6 +1489,7 @@ static int mc_probe(struct platform_device *pdev)
        ret = devm_snd_soc_register_card(&pdev->dev, card);
        if (ret) {
                dev_err(card->dev, "snd_soc_register_card failed %d\n", ret);
+               mc_dailink_exit_loop(card);
                return ret;
        }
 
@@ -1473,29 +1501,8 @@ static int mc_probe(struct platform_device *pdev)
 static int mc_remove(struct platform_device *pdev)
 {
        struct snd_soc_card *card = platform_get_drvdata(pdev);
-       struct snd_soc_dai_link *link;
-       int ret;
-       int i, j;
 
-       for (i = 0; i < ARRAY_SIZE(codec_info_list); i++) {
-               if (!codec_info_list[i].exit)
-                       continue;
-               /*
-                * We don't need to call .exit function if there is no matched
-                * dai link found.
-                */
-               for_each_card_prelinks(card, j, link) {
-                       if (!strcmp(link->codecs[0].dai_name,
-                                   codec_info_list[i].dai_name)) {
-                               ret = codec_info_list[i].exit(card, link);
-                               if (ret)
-                                       dev_warn(&pdev->dev,
-                                                "codec exit failed %d\n",
-                                                ret);
-                               break;
-                       }
-               }
-       }
+       mc_dailink_exit_loop(card);
 
        return 0;
 }
index 2439a57..deb7b82 100644 (file)
@@ -99,7 +99,6 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks,
        struct nhlt_fmt_cfg *fmt_cfg;
        struct wav_fmt_ext *wav_fmt;
        unsigned long rate;
-       bool present = false;
        int rate_index = 0;
        u16 channels, bps;
        u8 clk_src;
@@ -112,9 +111,12 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks,
        if (fmt->fmt_count == 0)
                return;
 
+       fmt_cfg = (struct nhlt_fmt_cfg *)fmt->fmt_config;
        for (i = 0; i < fmt->fmt_count; i++) {
-               fmt_cfg = &fmt->fmt_config[i];
-               wav_fmt = &fmt_cfg->fmt_ext;
+               struct nhlt_fmt_cfg *saved_fmt_cfg = fmt_cfg;
+               bool present = false;
+
+               wav_fmt = &saved_fmt_cfg->fmt_ext;
 
                channels = wav_fmt->fmt.channels;
                bps = wav_fmt->fmt.bits_per_sample;
@@ -132,12 +134,18 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks,
                 * derive the rate.
                 */
                for (j = i; j < fmt->fmt_count; j++) {
-                       fmt_cfg = &fmt->fmt_config[j];
-                       wav_fmt = &fmt_cfg->fmt_ext;
+                       struct nhlt_fmt_cfg *tmp_fmt_cfg = fmt_cfg;
+
+                       wav_fmt = &tmp_fmt_cfg->fmt_ext;
                        if ((fs == wav_fmt->fmt.samples_per_sec) &&
-                          (bps == wav_fmt->fmt.bits_per_sample))
+                          (bps == wav_fmt->fmt.bits_per_sample)) {
                                channels = max_t(u16, channels,
                                                wav_fmt->fmt.channels);
+                               saved_fmt_cfg = tmp_fmt_cfg;
+                       }
+                       /* Move to the next nhlt_fmt_cfg */
+                       tmp_fmt_cfg = (struct nhlt_fmt_cfg *)(tmp_fmt_cfg->config.caps +
+                                                             tmp_fmt_cfg->config.size);
                }
 
                rate = channels * bps * fs;
@@ -153,8 +161,11 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks,
 
                /* Fill rate and parent for sclk/sclkfs */
                if (!present) {
+                       struct nhlt_fmt_cfg *first_fmt_cfg;
+
+                       first_fmt_cfg = (struct nhlt_fmt_cfg *)fmt->fmt_config;
                        i2s_config_ext = (struct skl_i2s_config_blob_ext *)
-                                               fmt->fmt_config[0].config.caps;
+                                               first_fmt_cfg->config.caps;
 
                        /* MCLK Divider Source Select */
                        if (is_legacy_blob(i2s_config_ext->hdr.sig)) {
@@ -168,6 +179,9 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks,
 
                        parent = skl_get_parent_clk(clk_src);
 
+                       /* Move to the next nhlt_fmt_cfg */
+                       fmt_cfg = (struct nhlt_fmt_cfg *)(fmt_cfg->config.caps +
+                                                         fmt_cfg->config.size);
                        /*
                         * Do not copy the config data if there is no parent
                         * clock available for this clock source select
@@ -176,9 +190,9 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks,
                                continue;
 
                        sclk[id].rate_cfg[rate_index].rate = rate;
-                       sclk[id].rate_cfg[rate_index].config = fmt_cfg;
+                       sclk[id].rate_cfg[rate_index].config = saved_fmt_cfg;
                        sclkfs[id].rate_cfg[rate_index].rate = rate;
-                       sclkfs[id].rate_cfg[rate_index].config = fmt_cfg;
+                       sclkfs[id].rate_cfg[rate_index].config = saved_fmt_cfg;
                        sclk[id].parent_name = parent->name;
                        sclkfs[id].parent_name = parent->name;
 
@@ -192,13 +206,13 @@ static void skl_get_mclk(struct skl_dev *skl, struct skl_ssp_clk *mclk,
 {
        struct skl_i2s_config_blob_ext *i2s_config_ext;
        struct skl_i2s_config_blob_legacy *i2s_config;
-       struct nhlt_specific_cfg *fmt_cfg;
+       struct nhlt_fmt_cfg *fmt_cfg;
        struct skl_clk_parent_src *parent;
        u32 clkdiv, div_ratio;
        u8 clk_src;
 
-       fmt_cfg = &fmt->fmt_config[0].config;
-       i2s_config_ext = (struct skl_i2s_config_blob_ext *)fmt_cfg->caps;
+       fmt_cfg = (struct nhlt_fmt_cfg *)fmt->fmt_config;
+       i2s_config_ext = (struct skl_i2s_config_blob_ext *)fmt_cfg->config.caps;
 
        /* MCLK Divider Source Select and divider */
        if (is_legacy_blob(i2s_config_ext->hdr.sig)) {
@@ -227,7 +241,7 @@ static void skl_get_mclk(struct skl_dev *skl, struct skl_ssp_clk *mclk,
                return;
 
        mclk[id].rate_cfg[0].rate = parent->rate/div_ratio;
-       mclk[id].rate_cfg[0].config = &fmt->fmt_config[0];
+       mclk[id].rate_cfg[0].config = fmt_cfg;
        mclk[id].parent_name = parent->name;
 }
 
index 19c4a90..ee59ef3 100644 (file)
@@ -147,6 +147,12 @@ static int q6apm_dai_prepare(struct snd_soc_component *component,
        cfg.num_channels = runtime->channels;
        cfg.bit_width = prtd->bits_per_sample;
 
+       if (prtd->state) {
+               /* clear the previous setup if any  */
+               q6apm_graph_stop(prtd->graph);
+               q6apm_unmap_memory_regions(prtd->graph, substream->stream);
+       }
+
        prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
        prtd->pos = 0;
        /* rate and channels are sent to audio driver */
index f424d7a..7940192 100644 (file)
@@ -75,6 +75,7 @@ static struct audioreach_graph *q6apm_get_audioreach_graph(struct q6apm *apm, ui
        id = idr_alloc(&apm->graph_idr, graph, graph_id, graph_id + 1, GFP_KERNEL);
        if (id < 0) {
                dev_err(apm->dev, "Unable to allocate graph id (%d)\n", graph_id);
+               kfree(graph->graph);
                kfree(graph);
                mutex_unlock(&apm->lock);
                return ERR_PTR(id);
index 869c765..a8e842e 100644 (file)
@@ -62,6 +62,8 @@ struct snd_soc_dapm_widget *
 snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
                         const struct snd_soc_dapm_widget *widget);
 
+static unsigned int soc_dapm_read(struct snd_soc_dapm_context *dapm, int reg);
+
 /* dapm power sequences - make this per codec in the future */
 static int dapm_up_seq[] = {
        [snd_soc_dapm_pre] = 1,
@@ -442,6 +444,9 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
 
                        snd_soc_dapm_add_path(widget->dapm, data->widget,
                                              widget, NULL, NULL);
+               } else if (e->reg != SND_SOC_NOPM) {
+                       data->value = soc_dapm_read(widget->dapm, e->reg) &
+                                     (e->mask << e->shift_l);
                }
                break;
        default:
index e693070..d867f44 100644 (file)
@@ -526,7 +526,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
                return -EINVAL;
        if (mc->platform_max && tmp > mc->platform_max)
                return -EINVAL;
-       if (tmp > mc->max - mc->min + 1)
+       if (tmp > mc->max - mc->min)
                return -EINVAL;
 
        if (invert)
@@ -547,7 +547,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
                        return -EINVAL;
                if (mc->platform_max && tmp > mc->platform_max)
                        return -EINVAL;
-               if (tmp > mc->max - mc->min + 1)
+               if (tmp > mc->max - mc->min)
                        return -EINVAL;
 
                if (invert)
index 000ea90..e24eea7 100644 (file)
@@ -181,12 +181,20 @@ int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask)
  * Power Management.
  */
 
-static int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
+int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
 {
+       struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+       const struct sof_intel_dsp_desc *chip = hda->desc;
        unsigned int cpa;
        u32 adspcs;
        int ret;
 
+       /* restrict core_mask to host managed cores mask */
+       core_mask &= chip->host_managed_cores_mask;
+       /* return if core_mask is not valid */
+       if (!core_mask)
+               return 0;
+
        /* update bits */
        snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
                                HDA_DSP_ADSPCS_SPA_MASK(core_mask),
index 6429012..145d483 100644 (file)
@@ -95,9 +95,9 @@ out_put:
 }
 
 /*
- * first boot sequence has some extra steps. core 0 waits for power
- * status on core 1, so power up core 1 also momentarily, keep it in
- * reset/stall and then turn it off
+ * first boot sequence has some extra steps.
+ * power on all host managed cores and only unstall/run the boot core to boot the
+ * DSP then turn off all non boot cores (if any) is powered on.
  */
 static int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
 {
@@ -110,7 +110,7 @@ static int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
        int ret;
 
        /* step 1: power up corex */
-       ret = hda_dsp_enable_core(sdev, chip->host_managed_cores_mask);
+       ret = hda_dsp_core_power_up(sdev, chip->host_managed_cores_mask);
        if (ret < 0) {
                if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
                        dev_err(sdev->dev, "error: dsp core 0/1 power up failed\n");
@@ -127,7 +127,7 @@ static int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
        snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req, ipc_hdr);
 
        /* step 3: unset core 0 reset state & unstall/run core 0 */
-       ret = hda_dsp_core_run(sdev, BIT(0));
+       ret = hda_dsp_core_run(sdev, chip->init_core_mask);
        if (ret < 0) {
                if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
                        dev_err(sdev->dev,
@@ -389,7 +389,8 @@ int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
        struct snd_dma_buffer dmab;
        int ret, ret1, i;
 
-       if (hda->imrboot_supported && !sdev->first_boot) {
+       if (sdev->system_suspend_target < SOF_SUSPEND_S4 &&
+           hda->imrboot_supported && !sdev->first_boot) {
                dev_dbg(sdev->dev, "IMR restore supported, booting from IMR directly\n");
                hda->boot_iteration = 0;
                ret = hda_dsp_boot_imr(sdev);
index dc1f743..6888e0a 100644 (file)
@@ -192,79 +192,7 @@ snd_pcm_uframes_t hda_dsp_pcm_pointer(struct snd_sof_dev *sdev,
                goto found;
        }
 
-       switch (sof_hda_position_quirk) {
-       case SOF_HDA_POSITION_QUIRK_USE_SKYLAKE_LEGACY:
-               /*
-                * This legacy code, inherited from the Skylake driver,
-                * mixes DPIB registers and DPIB DDR updates and
-                * does not seem to follow any known hardware recommendations.
-                * It's not clear e.g. why there is a different flow
-                * for capture and playback, the only information that matters is
-                * what traffic class is used, and on all SOF-enabled platforms
-                * only VC0 is supported so the work-around was likely not necessary
-                * and quite possibly wrong.
-                */
-
-               /* DPIB/posbuf position mode:
-                * For Playback, Use DPIB register from HDA space which
-                * reflects the actual data transferred.
-                * For Capture, Use the position buffer for pointer, as DPIB
-                * is not accurate enough, its update may be completed
-                * earlier than the data written to DDR.
-                */
-               if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-                       pos = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
-                                              AZX_REG_VS_SDXDPIB_XBASE +
-                                              (AZX_REG_VS_SDXDPIB_XINTERVAL *
-                                               hstream->index));
-               } else {
-                       /*
-                        * For capture stream, we need more workaround to fix the
-                        * position incorrect issue:
-                        *
-                        * 1. Wait at least 20us before reading position buffer after
-                        * the interrupt generated(IOC), to make sure position update
-                        * happens on frame boundary i.e. 20.833uSec for 48KHz.
-                        * 2. Perform a dummy Read to DPIB register to flush DMA
-                        * position value.
-                        * 3. Read the DMA Position from posbuf. Now the readback
-                        * value should be >= period boundary.
-                        */
-                       usleep_range(20, 21);
-                       snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
-                                        AZX_REG_VS_SDXDPIB_XBASE +
-                                        (AZX_REG_VS_SDXDPIB_XINTERVAL *
-                                         hstream->index));
-                       pos = snd_hdac_stream_get_pos_posbuf(hstream);
-               }
-               break;
-       case SOF_HDA_POSITION_QUIRK_USE_DPIB_REGISTERS:
-               /*
-                * In case VC1 traffic is disabled this is the recommended option
-                */
-               pos = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
-                                      AZX_REG_VS_SDXDPIB_XBASE +
-                                      (AZX_REG_VS_SDXDPIB_XINTERVAL *
-                                       hstream->index));
-               break;
-       case SOF_HDA_POSITION_QUIRK_USE_DPIB_DDR_UPDATE:
-               /*
-                * This is the recommended option when VC1 is enabled.
-                * While this isn't needed for SOF platforms it's added for
-                * consistency and debug.
-                */
-               pos = snd_hdac_stream_get_pos_posbuf(hstream);
-               break;
-       default:
-               dev_err_once(sdev->dev, "hda_position_quirk value %d not supported\n",
-                            sof_hda_position_quirk);
-               pos = 0;
-               break;
-       }
-
-       if (pos >= hstream->bufsize)
-               pos = 0;
-
+       pos = hda_dsp_stream_get_position(hstream, substream->stream, true);
 found:
        pos = bytes_to_frames(substream->runtime, pos);
 
index daeb64c..d95ae17 100644 (file)
@@ -707,12 +707,13 @@ bool hda_dsp_check_stream_irq(struct snd_sof_dev *sdev)
 }
 
 static void
-hda_dsp_set_bytes_transferred(struct hdac_stream *hstream, u64 buffer_size)
+hda_dsp_compr_bytes_transferred(struct hdac_stream *hstream, int direction)
 {
+       u64 buffer_size = hstream->bufsize;
        u64 prev_pos, pos, num_bytes;
 
        div64_u64_rem(hstream->curr_pos, buffer_size, &prev_pos);
-       pos = snd_hdac_stream_get_pos_posbuf(hstream);
+       pos = hda_dsp_stream_get_position(hstream, direction, false);
 
        if (pos < prev_pos)
                num_bytes = (buffer_size - prev_pos) +  pos;
@@ -748,8 +749,7 @@ static bool hda_dsp_stream_check(struct hdac_bus *bus, u32 status)
                        if (s->substream && sof_hda->no_ipc_position) {
                                snd_sof_pcm_period_elapsed(s->substream);
                        } else if (s->cstream) {
-                               hda_dsp_set_bytes_transferred(s,
-                                       s->cstream->runtime->buffer_size);
+                               hda_dsp_compr_bytes_transferred(s, s->cstream->direction);
                                snd_compr_fragment_elapsed(s->cstream);
                        }
                }
@@ -1009,3 +1009,89 @@ void hda_dsp_stream_free(struct snd_sof_dev *sdev)
                devm_kfree(sdev->dev, hda_stream);
        }
 }
+
+snd_pcm_uframes_t hda_dsp_stream_get_position(struct hdac_stream *hstream,
+                                             int direction, bool can_sleep)
+{
+       struct hdac_ext_stream *hext_stream = stream_to_hdac_ext_stream(hstream);
+       struct sof_intel_hda_stream *hda_stream = hstream_to_sof_hda_stream(hext_stream);
+       struct snd_sof_dev *sdev = hda_stream->sdev;
+       snd_pcm_uframes_t pos;
+
+       switch (sof_hda_position_quirk) {
+       case SOF_HDA_POSITION_QUIRK_USE_SKYLAKE_LEGACY:
+               /*
+                * This legacy code, inherited from the Skylake driver,
+                * mixes DPIB registers and DPIB DDR updates and
+                * does not seem to follow any known hardware recommendations.
+                * It's not clear e.g. why there is a different flow
+                * for capture and playback, the only information that matters is
+                * what traffic class is used, and on all SOF-enabled platforms
+                * only VC0 is supported so the work-around was likely not necessary
+                * and quite possibly wrong.
+                */
+
+               /* DPIB/posbuf position mode:
+                * For Playback, Use DPIB register from HDA space which
+                * reflects the actual data transferred.
+                * For Capture, Use the position buffer for pointer, as DPIB
+                * is not accurate enough, its update may be completed
+                * earlier than the data written to DDR.
+                */
+               if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
+                       pos = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+                                              AZX_REG_VS_SDXDPIB_XBASE +
+                                              (AZX_REG_VS_SDXDPIB_XINTERVAL *
+                                               hstream->index));
+               } else {
+                       /*
+                        * For capture stream, we need more workaround to fix the
+                        * position incorrect issue:
+                        *
+                        * 1. Wait at least 20us before reading position buffer after
+                        * the interrupt generated(IOC), to make sure position update
+                        * happens on frame boundary i.e. 20.833uSec for 48KHz.
+                        * 2. Perform a dummy Read to DPIB register to flush DMA
+                        * position value.
+                        * 3. Read the DMA Position from posbuf. Now the readback
+                        * value should be >= period boundary.
+                        */
+                       if (can_sleep)
+                               usleep_range(20, 21);
+
+                       snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+                                        AZX_REG_VS_SDXDPIB_XBASE +
+                                        (AZX_REG_VS_SDXDPIB_XINTERVAL *
+                                         hstream->index));
+                       pos = snd_hdac_stream_get_pos_posbuf(hstream);
+               }
+               break;
+       case SOF_HDA_POSITION_QUIRK_USE_DPIB_REGISTERS:
+               /*
+                * In case VC1 traffic is disabled this is the recommended option
+                */
+               pos = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+                                      AZX_REG_VS_SDXDPIB_XBASE +
+                                      (AZX_REG_VS_SDXDPIB_XINTERVAL *
+                                       hstream->index));
+               break;
+       case SOF_HDA_POSITION_QUIRK_USE_DPIB_DDR_UPDATE:
+               /*
+                * This is the recommended option when VC1 is enabled.
+                * While this isn't needed for SOF platforms it's added for
+                * consistency and debug.
+                */
+               pos = snd_hdac_stream_get_pos_posbuf(hstream);
+               break;
+       default:
+               dev_err_once(sdev->dev, "hda_position_quirk value %d not supported\n",
+                            sof_hda_position_quirk);
+               pos = 0;
+               break;
+       }
+
+       if (pos >= hstream->bufsize)
+               pos = 0;
+
+       return pos;
+}
index 3e0f7b0..06476ff 100644 (file)
@@ -497,6 +497,7 @@ struct sof_intel_hda_stream {
  */
 int hda_dsp_probe(struct snd_sof_dev *sdev);
 int hda_dsp_remove(struct snd_sof_dev *sdev);
+int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask);
 int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask);
 int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask);
 int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
@@ -564,6 +565,9 @@ int hda_dsp_stream_setup_bdl(struct snd_sof_dev *sdev,
 bool hda_dsp_check_ipc_irq(struct snd_sof_dev *sdev);
 bool hda_dsp_check_stream_irq(struct snd_sof_dev *sdev);
 
+snd_pcm_uframes_t hda_dsp_stream_get_position(struct hdac_stream *hstream,
+                                             int direction, bool can_sleep);
+
 struct hdac_ext_stream *
        hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction, u32 flags);
 int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag);
index 043554d..10740c5 100644 (file)
@@ -1577,24 +1577,23 @@ static int sof_ipc3_control_load_bytes(struct snd_sof_dev *sdev, struct snd_sof_
        struct sof_ipc_ctrl_data *cdata;
        int ret;
 
-       scontrol->ipc_control_data = kzalloc(scontrol->max_size, GFP_KERNEL);
-       if (!scontrol->ipc_control_data)
-               return -ENOMEM;
-
-       if (scontrol->max_size < sizeof(*cdata) ||
-           scontrol->max_size < sizeof(struct sof_abi_hdr)) {
-               ret = -EINVAL;
-               goto err;
+       if (scontrol->max_size < (sizeof(*cdata) + sizeof(struct sof_abi_hdr))) {
+               dev_err(sdev->dev, "%s: insufficient size for a bytes control: %zu.\n",
+                       __func__, scontrol->max_size);
+               return -EINVAL;
        }
 
-       /* init the get/put bytes data */
        if (scontrol->priv_size > scontrol->max_size - sizeof(*cdata)) {
-               dev_err(sdev->dev, "err: bytes data size %zu exceeds max %zu.\n",
+               dev_err(sdev->dev,
+                       "%s: bytes data size %zu exceeds max %zu.\n", __func__,
                        scontrol->priv_size, scontrol->max_size - sizeof(*cdata));
-               ret = -EINVAL;
-               goto err;
+               return -EINVAL;
        }
 
+       scontrol->ipc_control_data = kzalloc(scontrol->max_size, GFP_KERNEL);
+       if (!scontrol->ipc_control_data)
+               return -ENOMEM;
+
        scontrol->size = sizeof(struct sof_ipc_ctrl_data) + scontrol->priv_size;
 
        cdata = scontrol->ipc_control_data;
index 3333a06..e006532 100644 (file)
@@ -392,7 +392,7 @@ static int mt8186_dsp_probe(struct snd_sof_dev *sdev)
                                                      PLATFORM_DEVID_NONE,
                                                      pdev, sizeof(*pdev));
        if (IS_ERR(priv->ipc_dev)) {
-               ret = IS_ERR(priv->ipc_dev);
+               ret = PTR_ERR(priv->ipc_dev);
                dev_err(sdev->dev, "failed to create mtk-adsp-ipc device\n");
                goto err_adsp_off;
        }
index 18eb327..df740be 100644 (file)
@@ -23,6 +23,9 @@ static u32 snd_sof_dsp_power_target(struct snd_sof_dev *sdev)
        u32 target_dsp_state;
 
        switch (sdev->system_suspend_target) {
+       case SOF_SUSPEND_S5:
+       case SOF_SUSPEND_S4:
+               /* DSP should be in D3 if the system is suspending to S3+ */
        case SOF_SUSPEND_S3:
                /* DSP should be in D3 if the system is suspending to S3 */
                target_dsp_state = SOF_DSP_PM_D3;
@@ -335,8 +338,24 @@ int snd_sof_prepare(struct device *dev)
                return 0;
 
 #if defined(CONFIG_ACPI)
-       if (acpi_target_system_state() == ACPI_STATE_S0)
+       switch (acpi_target_system_state()) {
+       case ACPI_STATE_S0:
                sdev->system_suspend_target = SOF_SUSPEND_S0IX;
+               break;
+       case ACPI_STATE_S1:
+       case ACPI_STATE_S2:
+       case ACPI_STATE_S3:
+               sdev->system_suspend_target = SOF_SUSPEND_S3;
+               break;
+       case ACPI_STATE_S4:
+               sdev->system_suspend_target = SOF_SUSPEND_S4;
+               break;
+       case ACPI_STATE_S5:
+               sdev->system_suspend_target = SOF_SUSPEND_S5;
+               break;
+       default:
+               break;
+       }
 #endif
 
        return 0;
index 9d7f53f..f0f3d72 100644 (file)
@@ -85,6 +85,8 @@ enum sof_system_suspend_state {
        SOF_SUSPEND_NONE = 0,
        SOF_SUSPEND_S0IX,
        SOF_SUSPEND_S3,
+       SOF_SUSPEND_S4,
+       SOF_SUSPEND_S5,
 };
 
 enum sof_dfsentry_type {
index 7865cda..da519ea 100644 (file)
@@ -316,8 +316,6 @@ static inline int omap_mcbsp_read(struct omap_mcbsp *mcbsp, u16 reg,
 
 /* Sidetone specific API */
 int omap_mcbsp_st_init(struct platform_device *pdev);
-void omap_mcbsp_st_cleanup(struct platform_device *pdev);
-
 int omap_mcbsp_st_start(struct omap_mcbsp *mcbsp);
 int omap_mcbsp_st_stop(struct omap_mcbsp *mcbsp);
 
index 0bc7d26..7e8179c 100644 (file)
@@ -347,7 +347,7 @@ int omap_mcbsp_st_init(struct platform_device *pdev)
        if (!st_data)
                return -ENOMEM;
 
-       st_data->mcbsp_iclk = clk_get(mcbsp->dev, "ick");
+       st_data->mcbsp_iclk = devm_clk_get(mcbsp->dev, "ick");
        if (IS_ERR(st_data->mcbsp_iclk)) {
                dev_warn(mcbsp->dev,
                         "Failed to get ick, sidetone might be broken\n");
@@ -359,7 +359,7 @@ int omap_mcbsp_st_init(struct platform_device *pdev)
        if (!st_data->io_base_st)
                return -ENOMEM;
 
-       ret = sysfs_create_group(&mcbsp->dev->kobj, &sidetone_attr_group);
+       ret = devm_device_add_group(mcbsp->dev, &sidetone_attr_group);
        if (ret)
                return ret;
 
@@ -368,16 +368,6 @@ int omap_mcbsp_st_init(struct platform_device *pdev)
        return 0;
 }
 
-void omap_mcbsp_st_cleanup(struct platform_device *pdev)
-{
-       struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev);
-
-       if (mcbsp->st_data) {
-               sysfs_remove_group(&mcbsp->dev->kobj, &sidetone_attr_group);
-               clk_put(mcbsp->st_data->mcbsp_iclk);
-       }
-}
-
 static int omap_mcbsp_st_info_volsw(struct snd_kcontrol *kcontrol,
                                    struct snd_ctl_elem_info *uinfo)
 {
index 4479d74..9933b33 100644 (file)
@@ -702,8 +702,7 @@ static int omap_mcbsp_init(struct platform_device *pdev)
                mcbsp->max_tx_thres = max_thres(mcbsp) - 0x10;
                mcbsp->max_rx_thres = max_thres(mcbsp) - 0x10;
 
-               ret = sysfs_create_group(&mcbsp->dev->kobj,
-                                        &additional_attr_group);
+               ret = devm_device_add_group(mcbsp->dev, &additional_attr_group);
                if (ret) {
                        dev_err(mcbsp->dev,
                                "Unable to create additional controls\n");
@@ -711,16 +710,7 @@ static int omap_mcbsp_init(struct platform_device *pdev)
                }
        }
 
-       ret = omap_mcbsp_st_init(pdev);
-       if (ret)
-               goto err_st;
-
-       return 0;
-
-err_st:
-       if (mcbsp->pdata->buffer_size)
-               sysfs_remove_group(&mcbsp->dev->kobj, &additional_attr_group);
-       return ret;
+       return omap_mcbsp_st_init(pdev);
 }
 
 /*
@@ -1431,11 +1421,6 @@ static int asoc_mcbsp_remove(struct platform_device *pdev)
        if (cpu_latency_qos_request_active(&mcbsp->pm_qos_req))
                cpu_latency_qos_remove_request(&mcbsp->pm_qos_req);
 
-       if (mcbsp->pdata->buffer_size)
-               sysfs_remove_group(&mcbsp->dev->kobj, &additional_attr_group);
-
-       omap_mcbsp_st_cleanup(pdev);
-
        return 0;
 }
 
index b7b6f38..6eb7d93 100644 (file)
@@ -637,10 +637,10 @@ static int snd_get_meter_comp_index(struct snd_us16x08_meter_store *store)
                }
        } else {
                /* skip channels with no compressor active */
-               while (!store->comp_store->val[
+               while (store->comp_index <= SND_US16X08_MAX_CHANNELS
+                       && !store->comp_store->val[
                        COMP_STORE_IDX(SND_US16X08_ID_COMP_SWITCH)]
-                       [store->comp_index - 1]
-                       && store->comp_index <= SND_US16X08_MAX_CHANNELS) {
+                       [store->comp_index - 1]) {
                        store->comp_index++;
                }
                ret = store->comp_index++;
index 4f56e17..f93201a 100644 (file)
@@ -3803,6 +3803,54 @@ YAMAHA_DEVICE(0x7010, "UB99"),
 },
 
 /*
+ * MacroSilicon MS2100/MS2106 based AV capture cards
+ *
+ * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch.
+ * They also need QUIRK_FLAG_ALIGN_TRANSFER, which makes one wonder if
+ * they pretend to be 96kHz mono as a workaround for stereo being broken
+ * by that...
+ *
+ * They also have an issue with initial stream alignment that causes the
+ * channels to be swapped and out of phase, which is dealt with in quirks.c.
+ */
+{
+       USB_AUDIO_DEVICE(0x534d, 0x0021),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .vendor_name = "MacroSilicon",
+               .product_name = "MS210x",
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = &(const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_STANDARD_MIXER,
+                       },
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S16_LE,
+                                       .channels = 2,
+                                       .iface = 3,
+                                       .altsetting = 1,
+                                       .altset_idx = 1,
+                                       .attributes = 0,
+                                       .endpoint = 0x82,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC |
+                                               USB_ENDPOINT_SYNC_ASYNC,
+                                       .rates = SNDRV_PCM_RATE_CONTINUOUS,
+                                       .rate_min = 48000,
+                                       .rate_max = 48000,
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
+
+/*
  * MacroSilicon MS2109 based HDMI capture cards
  *
  * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch.
@@ -4119,6 +4167,206 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                }
        }
 },
+{
+       /*
+        * Fiero SC-01 (firmware v1.0.0 @ 48 kHz)
+        */
+       USB_DEVICE(0x2b53, 0x0023),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .vendor_name = "Fiero",
+               .product_name = "SC-01",
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = &(const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       /* Playback */
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S32_LE,
+                                       .channels = 2,
+                                       .fmt_bits = 24,
+                                       .iface = 1,
+                                       .altsetting = 1,
+                                       .altset_idx = 1,
+                                       .endpoint = 0x01,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC |
+                                                  USB_ENDPOINT_SYNC_ASYNC,
+                                       .rates = SNDRV_PCM_RATE_48000,
+                                       .rate_min = 48000,
+                                       .rate_max = 48000,
+                                       .nr_rates = 1,
+                                       .rate_table = (unsigned int[]) { 48000 },
+                                       .clock = 0x29
+                               }
+                       },
+                       /* Capture */
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S32_LE,
+                                       .channels = 2,
+                                       .fmt_bits = 24,
+                                       .iface = 2,
+                                       .altsetting = 1,
+                                       .altset_idx = 1,
+                                       .endpoint = 0x82,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC |
+                                                  USB_ENDPOINT_SYNC_ASYNC |
+                                                  USB_ENDPOINT_USAGE_IMPLICIT_FB,
+                                       .rates = SNDRV_PCM_RATE_48000,
+                                       .rate_min = 48000,
+                                       .rate_max = 48000,
+                                       .nr_rates = 1,
+                                       .rate_table = (unsigned int[]) { 48000 },
+                                       .clock = 0x29
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
+{
+       /*
+        * Fiero SC-01 (firmware v1.0.0 @ 96 kHz)
+        */
+       USB_DEVICE(0x2b53, 0x0024),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .vendor_name = "Fiero",
+               .product_name = "SC-01",
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = &(const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       /* Playback */
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S32_LE,
+                                       .channels = 2,
+                                       .fmt_bits = 24,
+                                       .iface = 1,
+                                       .altsetting = 1,
+                                       .altset_idx = 1,
+                                       .endpoint = 0x01,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC |
+                                                  USB_ENDPOINT_SYNC_ASYNC,
+                                       .rates = SNDRV_PCM_RATE_96000,
+                                       .rate_min = 96000,
+                                       .rate_max = 96000,
+                                       .nr_rates = 1,
+                                       .rate_table = (unsigned int[]) { 96000 },
+                                       .clock = 0x29
+                               }
+                       },
+                       /* Capture */
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S32_LE,
+                                       .channels = 2,
+                                       .fmt_bits = 24,
+                                       .iface = 2,
+                                       .altsetting = 1,
+                                       .altset_idx = 1,
+                                       .endpoint = 0x82,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC |
+                                                  USB_ENDPOINT_SYNC_ASYNC |
+                                                  USB_ENDPOINT_USAGE_IMPLICIT_FB,
+                                       .rates = SNDRV_PCM_RATE_96000,
+                                       .rate_min = 96000,
+                                       .rate_max = 96000,
+                                       .nr_rates = 1,
+                                       .rate_table = (unsigned int[]) { 96000 },
+                                       .clock = 0x29
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
+{
+       /*
+        * Fiero SC-01 (firmware v1.1.0)
+        */
+       USB_DEVICE(0x2b53, 0x0031),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .vendor_name = "Fiero",
+               .product_name = "SC-01",
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = &(const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       /* Playback */
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S32_LE,
+                                       .channels = 2,
+                                       .fmt_bits = 24,
+                                       .iface = 1,
+                                       .altsetting = 1,
+                                       .altset_idx = 1,
+                                       .endpoint = 0x01,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC |
+                                                  USB_ENDPOINT_SYNC_ASYNC,
+                                       .rates = SNDRV_PCM_RATE_48000 |
+                                                SNDRV_PCM_RATE_96000,
+                                       .rate_min = 48000,
+                                       .rate_max = 96000,
+                                       .nr_rates = 2,
+                                       .rate_table = (unsigned int[]) { 48000, 96000 },
+                                       .clock = 0x29
+                               }
+                       },
+                       /* Capture */
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S32_LE,
+                                       .channels = 2,
+                                       .fmt_bits = 24,
+                                       .iface = 2,
+                                       .altsetting = 1,
+                                       .altset_idx = 1,
+                                       .endpoint = 0x82,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC |
+                                                  USB_ENDPOINT_SYNC_ASYNC |
+                                                  USB_ENDPOINT_USAGE_IMPLICIT_FB,
+                                       .rates = SNDRV_PCM_RATE_48000 |
+                                                SNDRV_PCM_RATE_96000,
+                                       .rate_min = 48000,
+                                       .rate_max = 96000,
+                                       .nr_rates = 2,
+                                       .rate_table = (unsigned int[]) { 48000, 96000 },
+                                       .clock = 0x29
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
 
 #undef USB_DEVICE_VENDOR_SPEC
 #undef USB_AUDIO_DEVICE
index e8468f9..968d90c 100644 (file)
@@ -1478,6 +1478,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
        case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */
                set_format_emu_quirk(subs, fmt);
                break;
+       case USB_ID(0x534d, 0x0021): /* MacroSilicon MS2100/MS2106 */
        case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
                subs->stream_offset_adj = 2;
                break;
@@ -1842,6 +1843,10 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
                   QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
        DEVICE_FLG(0x1395, 0x740a, /* Sennheiser DECT */
                   QUIRK_FLAG_GET_SAMPLE_RATE),
+       DEVICE_FLG(0x1397, 0x0508, /* Behringer UMC204HD */
+                  QUIRK_FLAG_PLAYBACK_FIRST | QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+       DEVICE_FLG(0x1397, 0x0509, /* Behringer UMC404HD */
+                  QUIRK_FLAG_PLAYBACK_FIRST | QUIRK_FLAG_GENERIC_IMPLICIT_FB),
        DEVICE_FLG(0x13e5, 0x0001, /* Serato Phono */
                   QUIRK_FLAG_IGNORE_CTL_ERROR),
        DEVICE_FLG(0x154e, 0x1002, /* Denon DCD-1500RE */
@@ -1904,10 +1909,18 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
                   QUIRK_FLAG_IGNORE_CTL_ERROR),
        DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */
                   QUIRK_FLAG_GET_SAMPLE_RATE),
+       DEVICE_FLG(0x534d, 0x0021, /* MacroSilicon MS2100/MS2106 */
+                  QUIRK_FLAG_ALIGN_TRANSFER),
        DEVICE_FLG(0x534d, 0x2109, /* MacroSilicon MS2109 */
                   QUIRK_FLAG_ALIGN_TRANSFER),
        DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
                   QUIRK_FLAG_GET_SAMPLE_RATE),
+       DEVICE_FLG(0x2b53, 0x0023, /* Fiero SC-01 (firmware v1.0.0 @ 48 kHz) */
+                  QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+       DEVICE_FLG(0x2b53, 0x0024, /* Fiero SC-01 (firmware v1.0.0 @ 96 kHz) */
+                  QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+       DEVICE_FLG(0x2b53, 0x0031, /* Fiero SC-01 (firmware v1.1.0) */
+                  QUIRK_FLAG_GENERIC_IMPLICIT_FB),
 
        /* Vendor matches */
        VENDOR_FLG(0x045e, /* MS Lifecam */
index 0d828e3..ab95fb3 100644 (file)
@@ -33,6 +33,8 @@
 #include <drm/intel_lpe_audio.h>
 #include "intel_hdmi_audio.h"
 
+#define INTEL_HDMI_AUDIO_SUSPEND_DELAY_MS  5000
+
 #define for_each_pipe(card_ctx, pipe) \
        for ((pipe) = 0; (pipe) < (card_ctx)->num_pipes; (pipe)++)
 #define for_each_port(card_ctx, port) \
@@ -1066,7 +1068,9 @@ static int had_pcm_open(struct snd_pcm_substream *substream)
        intelhaddata = snd_pcm_substream_chip(substream);
        runtime = substream->runtime;
 
-       pm_runtime_get_sync(intelhaddata->dev);
+       retval = pm_runtime_resume_and_get(intelhaddata->dev);
+       if (retval < 0)
+               return retval;
 
        /* set the runtime hw parameter with local snd_pcm_hardware struct */
        runtime->hw = had_pcm_hardware;
@@ -1534,8 +1538,12 @@ static void had_audio_wq(struct work_struct *work)
                container_of(work, struct snd_intelhad, hdmi_audio_wq);
        struct intel_hdmi_lpe_audio_pdata *pdata = ctx->dev->platform_data;
        struct intel_hdmi_lpe_audio_port_pdata *ppdata = &pdata->port[ctx->port];
+       int ret;
+
+       ret = pm_runtime_resume_and_get(ctx->dev);
+       if (ret < 0)
+               return;
 
-       pm_runtime_get_sync(ctx->dev);
        mutex_lock(&ctx->mutex);
        if (ppdata->pipe < 0) {
                dev_dbg(ctx->dev, "%s: Event: HAD_NOTIFY_HOT_UNPLUG : port = %d\n",
@@ -1802,8 +1810,11 @@ static int __hdmi_lpe_audio_probe(struct platform_device *pdev)
        pdata->notify_audio_lpe = notify_audio_lpe;
        spin_unlock_irq(&pdata->lpe_audio_slock);
 
+       pm_runtime_set_autosuspend_delay(&pdev->dev, INTEL_HDMI_AUDIO_SUSPEND_DELAY_MS);
        pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
        pm_runtime_mark_last_busy(&pdev->dev);
+       pm_runtime_idle(&pdev->dev);
 
        dev_dbg(&pdev->dev, "%s: handle pending notification\n", __func__);
        for_each_port(card_ctx, port) {
index e09d690..8aa0d27 100644 (file)
@@ -36,7 +36,7 @@
 #define MIDR_VARIANT(midr)     \
        (((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT)
 #define MIDR_IMPLEMENTOR_SHIFT 24
-#define MIDR_IMPLEMENTOR_MASK  (0xff << MIDR_IMPLEMENTOR_SHIFT)
+#define MIDR_IMPLEMENTOR_MASK  (0xffU << MIDR_IMPLEMENTOR_SHIFT)
 #define MIDR_IMPLEMENTOR(midr) \
        (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
 
 
 #define APPLE_CPU_PART_M1_ICESTORM     0x022
 #define APPLE_CPU_PART_M1_FIRESTORM    0x023
+#define APPLE_CPU_PART_M1_ICESTORM_PRO 0x024
+#define APPLE_CPU_PART_M1_FIRESTORM_PRO        0x025
+#define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028
+#define APPLE_CPU_PART_M1_FIRESTORM_MAX        0x029
 
 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
 #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
 #define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
 #define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
 #define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
+#define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO)
+#define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO)
+#define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX)
+#define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
 
 /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
 #define MIDR_FUJITSU_ERRATUM_010001            MIDR_FUJITSU_A64FX
 
 #ifndef __ASSEMBLY__
 
-#include "sysreg.h"
+#include <asm/sysreg.h>
 
 #define read_cpuid(reg)                        read_sysreg_s(SYS_ ## reg)
 
index c1b6ddc..3bb1343 100644 (file)
@@ -139,8 +139,10 @@ struct kvm_guest_debug_arch {
        __u64 dbg_wvr[KVM_ARM_MAX_DBG_REGS];
 };
 
+#define KVM_DEBUG_ARCH_HSR_HIGH_VALID  (1 << 0)
 struct kvm_debug_exit_arch {
        __u32 hsr;
+       __u32 hsr_high; /* ESR_EL2[61:32] */
        __u64 far;      /* used for watchpoints */
 };
 
@@ -332,6 +334,40 @@ struct kvm_arm_copy_mte_tags {
 #define KVM_ARM64_SVE_VLS_WORDS        \
        ((KVM_ARM64_SVE_VQ_MAX - KVM_ARM64_SVE_VQ_MIN) / 64 + 1)
 
+/* Bitmap feature firmware registers */
+#define KVM_REG_ARM_FW_FEAT_BMAP               (0x0016 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_FEAT_BMAP_REG(r)                (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+                                               KVM_REG_ARM_FW_FEAT_BMAP |      \
+                                               ((r) & 0xffff))
+
+#define KVM_REG_ARM_STD_BMAP                   KVM_REG_ARM_FW_FEAT_BMAP_REG(0)
+
+enum {
+       KVM_REG_ARM_STD_BIT_TRNG_V1_0   = 0,
+#ifdef __KERNEL__
+       KVM_REG_ARM_STD_BMAP_BIT_COUNT,
+#endif
+};
+
+#define KVM_REG_ARM_STD_HYP_BMAP               KVM_REG_ARM_FW_FEAT_BMAP_REG(1)
+
+enum {
+       KVM_REG_ARM_STD_HYP_BIT_PV_TIME = 0,
+#ifdef __KERNEL__
+       KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT,
+#endif
+};
+
+#define KVM_REG_ARM_VENDOR_HYP_BMAP            KVM_REG_ARM_FW_FEAT_BMAP_REG(2)
+
+enum {
+       KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT    = 0,
+       KVM_REG_ARM_VENDOR_HYP_BIT_PTP          = 1,
+#ifdef __KERNEL__
+       KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT,
+#endif
+};
+
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR      0
 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
index 73e643a..a77b915 100644 (file)
 #define X86_FEATURE_INVPCID_SINGLE     ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
 #define X86_FEATURE_HW_PSTATE          ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK      ( 7*32+ 9) /* AMD ProcFeedbackInterface */
-/* FREE!                                ( 7*32+10) */
+#define X86_FEATURE_XCOMPACTED         ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
 #define X86_FEATURE_PTI                        ( 7*32+11) /* Kernel Page Table Isolation enabled */
-#define X86_FEATURE_RETPOLINE          ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
-#define X86_FEATURE_RETPOLINE_LFENCE   ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */
+#define X86_FEATURE_KERNEL_IBRS                ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
+#define X86_FEATURE_RSB_VMEXIT         ( 7*32+13) /* "" Fill RSB on VM-Exit */
 #define X86_FEATURE_INTEL_PPIN         ( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_CDP_L2             ( 7*32+15) /* Code and Data Prioritization L2 */
 #define X86_FEATURE_MSR_SPEC_CTRL      ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
 #define X86_FEATURE_SSBD               ( 7*32+17) /* Speculative Store Bypass Disable */
 #define X86_FEATURE_MBA                        ( 7*32+18) /* Memory Bandwidth Allocation */
 #define X86_FEATURE_RSB_CTXSW          ( 7*32+19) /* "" Fill RSB on context switches */
-/* FREE!                                ( 7*32+20) */
+#define X86_FEATURE_PERFMON_V2         ( 7*32+20) /* AMD Performance Monitoring Version 2 */
 #define X86_FEATURE_USE_IBPB           ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
 #define X86_FEATURE_USE_IBRS_FW                ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
 #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE  ( 7*32+23) /* "" Disable Speculative Store Bypass. */
 #define X86_FEATURE_VMW_VMMCALL                ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
 #define X86_FEATURE_PVUNLOCK           ( 8*32+20) /* "" PV unlock function */
 #define X86_FEATURE_VCPUPREEMPT                ( 8*32+21) /* "" PV vcpu_is_preempted function */
+#define X86_FEATURE_TDX_GUEST          ( 8*32+22) /* Intel Trust Domain Extensions Guest */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
 #define X86_FEATURE_FSGSBASE           ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
 #define X86_FEATURE_PER_THREAD_MBA     (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
 #define X86_FEATURE_SGX1               (11*32+ 8) /* "" Basic SGX */
 #define X86_FEATURE_SGX2               (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */
+#define X86_FEATURE_ENTRY_IBPB         (11*32+10) /* "" Issue an IBPB on kernel entry */
+#define X86_FEATURE_RRSBA_CTRL         (11*32+11) /* "" RET prediction control */
+#define X86_FEATURE_RETPOLINE          (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_LFENCE   (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
+#define X86_FEATURE_RETHUNK            (11*32+14) /* "" Use REturn THUNK */
+#define X86_FEATURE_UNRET              (11*32+15) /* "" AMD BTB untrain return */
+#define X86_FEATURE_USE_IBPB_FW                (11*32+16) /* "" Use IBPB during runtime firmware calls */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
 #define X86_FEATURE_AVX_VNNI           (12*32+ 4) /* AVX VNNI instructions */
 #define X86_FEATURE_VIRT_SSBD          (13*32+25) /* Virtualized Speculative Store Bypass Disable */
 #define X86_FEATURE_AMD_SSB_NO         (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
 #define X86_FEATURE_CPPC               (13*32+27) /* Collaborative Processor Performance Control */
+#define X86_FEATURE_BTC_NO             (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
+#define X86_FEATURE_BRS                        (13*32+31) /* Branch Sampling available */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 #define X86_FEATURE_DTHERM             (14*32+ 0) /* Digital Thermal Sensor */
 #define X86_FEATURE_SEV                        (19*32+ 1) /* AMD Secure Encrypted Virtualization */
 #define X86_FEATURE_VM_PAGE_FLUSH      (19*32+ 2) /* "" VM Page Flush MSR is supported */
 #define X86_FEATURE_SEV_ES             (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
+#define X86_FEATURE_V_TSC_AUX          (19*32+ 9) /* "" Virtual TSC_AUX */
 #define X86_FEATURE_SME_COHERENT       (19*32+10) /* "" AMD hardware-enforced cache coherency */
 
 /*
 #define X86_BUG_TAA                    X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
 #define X86_BUG_ITLB_MULTIHIT          X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
 #define X86_BUG_SRBDS                  X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
+#define X86_BUG_MMIO_STALE_DATA                X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
+#define X86_BUG_RETBLEED               X86_BUG(26) /* CPU is affected by RETBleed */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index 1ae0fab..33d2cd0 100644 (file)
 # define DISABLE_PTI           (1 << (X86_FEATURE_PTI & 31))
 #endif
 
+#ifdef CONFIG_RETPOLINE
+# define DISABLE_RETPOLINE     0
+#else
+# define DISABLE_RETPOLINE     ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
+                                (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)))
+#endif
+
+#ifdef CONFIG_RETHUNK
+# define DISABLE_RETHUNK       0
+#else
+# define DISABLE_RETHUNK       (1 << (X86_FEATURE_RETHUNK & 31))
+#endif
+
+#ifdef CONFIG_CPU_UNRET_ENTRY
+# define DISABLE_UNRET         0
+#else
+# define DISABLE_UNRET         (1 << (X86_FEATURE_UNRET & 31))
+#endif
+
 #ifdef CONFIG_INTEL_IOMMU_SVM
 # define DISABLE_ENQCMD                0
 #else
 # define DISABLE_SGX   (1 << (X86_FEATURE_SGX & 31))
 #endif
 
+#ifdef CONFIG_INTEL_TDX_GUEST
+# define DISABLE_TDX_GUEST     0
+#else
+# define DISABLE_TDX_GUEST     (1 << (X86_FEATURE_TDX_GUEST & 31))
+#endif
+
 /*
  * Make sure to add features to the correct mask
  */
 #define DISABLED_MASK5 0
 #define DISABLED_MASK6 0
 #define DISABLED_MASK7 (DISABLE_PTI)
-#define DISABLED_MASK8 0
+#define DISABLED_MASK8 (DISABLE_TDX_GUEST)
 #define DISABLED_MASK9 (DISABLE_SGX)
 #define DISABLED_MASK10        0
-#define DISABLED_MASK11        0
+#define DISABLED_MASK11        (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET)
 #define DISABLED_MASK12        0
 #define DISABLED_MASK13        0
 #define DISABLED_MASK14        0
index 403e83b..cc615be 100644 (file)
@@ -51,6 +51,8 @@
 #define SPEC_CTRL_STIBP                        BIT(SPEC_CTRL_STIBP_SHIFT)      /* STIBP mask */
 #define SPEC_CTRL_SSBD_SHIFT           2          /* Speculative Store Bypass Disable bit */
 #define SPEC_CTRL_SSBD                 BIT(SPEC_CTRL_SSBD_SHIFT)       /* Speculative Store Bypass Disable */
+#define SPEC_CTRL_RRSBA_DIS_S_SHIFT    6          /* Disable RRSBA behavior */
+#define SPEC_CTRL_RRSBA_DIS_S          BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
 
 #define MSR_IA32_PRED_CMD              0x00000049 /* Prediction Command */
 #define PRED_CMD_IBPB                  BIT(0)     /* Indirect Branch Prediction Barrier */
@@ -93,6 +95,7 @@
 #define MSR_IA32_ARCH_CAPABILITIES     0x0000010a
 #define ARCH_CAP_RDCL_NO               BIT(0)  /* Not susceptible to Meltdown */
 #define ARCH_CAP_IBRS_ALL              BIT(1)  /* Enhanced IBRS support */
+#define ARCH_CAP_RSBA                  BIT(2)  /* RET may use alternative branch predictors */
 #define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3)  /* Skip L1D flush on vmentry */
 #define ARCH_CAP_SSB_NO                        BIT(4)  /*
                                                 * Not susceptible to Speculative Store Bypass
                                                 * Not susceptible to
                                                 * TSX Async Abort (TAA) vulnerabilities.
                                                 */
+#define ARCH_CAP_SBDR_SSDP_NO          BIT(13) /*
+                                                * Not susceptible to SBDR and SSDP
+                                                * variants of Processor MMIO stale data
+                                                * vulnerabilities.
+                                                */
+#define ARCH_CAP_FBSDP_NO              BIT(14) /*
+                                                * Not susceptible to FBSDP variant of
+                                                * Processor MMIO stale data
+                                                * vulnerabilities.
+                                                */
+#define ARCH_CAP_PSDP_NO               BIT(15) /*
+                                                * Not susceptible to PSDP variant of
+                                                * Processor MMIO stale data
+                                                * vulnerabilities.
+                                                */
+#define ARCH_CAP_FB_CLEAR              BIT(17) /*
+                                                * VERW clears CPU fill buffer
+                                                * even on MDS_NO CPUs.
+                                                */
+#define ARCH_CAP_FB_CLEAR_CTRL         BIT(18) /*
+                                                * MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]
+                                                * bit available to control VERW
+                                                * behavior.
+                                                */
+#define ARCH_CAP_RRSBA                 BIT(19) /*
+                                                * Indicates RET may use predictors
+                                                * other than the RSB. With eIBRS
+                                                * enabled predictions in kernel mode
+                                                * are restricted to targets in
+                                                * kernel.
+                                                */
 
 #define MSR_IA32_FLUSH_CMD             0x0000010b
 #define L1D_FLUSH                      BIT(0)  /*
 #define MSR_IA32_MCU_OPT_CTRL          0x00000123
 #define RNGDS_MITG_DIS                 BIT(0)  /* SRBDS support */
 #define RTM_ALLOW                      BIT(1)  /* TSX development mode */
+#define FB_CLEAR_DIS                   BIT(3)  /* CPU Fill buffer clear disable */
 
 #define MSR_IA32_SYSENTER_CS           0x00000174
 #define MSR_IA32_SYSENTER_ESP          0x00000175
 /* Fam 17h MSRs */
 #define MSR_F17H_IRPERF                        0xc00000e9
 
+#define MSR_ZEN2_SPECTRAL_CHICKEN      0xc00110e3
+#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT  BIT_ULL(1)
+
 /* Fam 16h MSRs */
 #define MSR_F16H_L2I_PERF_CTL          0xc0010230
 #define MSR_F16H_L2I_PERF_CTR          0xc0010231
index bf6e960..2161480 100644 (file)
@@ -428,11 +428,12 @@ struct kvm_sync_regs {
        struct kvm_vcpu_events events;
 };
 
-#define KVM_X86_QUIRK_LINT0_REENABLED     (1 << 0)
-#define KVM_X86_QUIRK_CD_NW_CLEARED       (1 << 1)
-#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE     (1 << 2)
-#define KVM_X86_QUIRK_OUT_7E_INC_RIP      (1 << 3)
-#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
+#define KVM_X86_QUIRK_LINT0_REENABLED          (1 << 0)
+#define KVM_X86_QUIRK_CD_NW_CLEARED            (1 << 1)
+#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE          (1 << 2)
+#define KVM_X86_QUIRK_OUT_7E_INC_RIP           (1 << 3)
+#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT     (1 << 4)
+#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN       (1 << 5)
 
 #define KVM_STATE_NESTED_FORMAT_VMX    0
 #define KVM_STATE_NESTED_FORMAT_SVM    1
index efa9693..f69c168 100644 (file)
 #define SVM_VMGEXIT_AP_JUMP_TABLE              0x80000005
 #define SVM_VMGEXIT_SET_AP_JUMP_TABLE          0
 #define SVM_VMGEXIT_GET_AP_JUMP_TABLE          1
+#define SVM_VMGEXIT_PSC                                0x80000010
+#define SVM_VMGEXIT_GUEST_REQUEST              0x80000011
+#define SVM_VMGEXIT_EXT_GUEST_REQUEST          0x80000012
+#define SVM_VMGEXIT_AP_CREATION                        0x80000013
+#define SVM_VMGEXIT_AP_CREATE_ON_INIT          0
+#define SVM_VMGEXIT_AP_CREATE                  1
+#define SVM_VMGEXIT_AP_DESTROY                 2
+#define SVM_VMGEXIT_HV_FEATURES                        0x8000fffd
 #define SVM_VMGEXIT_UNSUPPORTED_EVENT          0x8000ffff
 
 /* Exit code reserved for hypervisor/software use */
        { SVM_VMGEXIT_NMI_COMPLETE,     "vmgexit_nmi_complete" }, \
        { SVM_VMGEXIT_AP_HLT_LOOP,      "vmgexit_ap_hlt_loop" }, \
        { SVM_VMGEXIT_AP_JUMP_TABLE,    "vmgexit_ap_jump_table" }, \
+       { SVM_VMGEXIT_PSC,              "vmgexit_page_state_change" }, \
+       { SVM_VMGEXIT_GUEST_REQUEST,    "vmgexit_guest_request" }, \
+       { SVM_VMGEXIT_EXT_GUEST_REQUEST, "vmgexit_ext_guest_request" }, \
+       { SVM_VMGEXIT_AP_CREATION,      "vmgexit_ap_creation" }, \
+       { SVM_VMGEXIT_HV_FEATURES,      "vmgexit_hypervisor_feature" }, \
        { SVM_EXIT_ERR,         "invalid_guest_state" }
 
 
index 6491fa8..10bc88c 100644 (file)
@@ -32,11 +32,16 @@ struct unwind_hint {
  *
  * UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function.
  * Useful for code which doesn't have an ELF function annotation.
+ *
+ * UNWIND_HINT_ENTRY: machine entry without stack, SYSCALL/SYSENTER etc.
  */
 #define UNWIND_HINT_TYPE_CALL          0
 #define UNWIND_HINT_TYPE_REGS          1
 #define UNWIND_HINT_TYPE_REGS_PARTIAL  2
 #define UNWIND_HINT_TYPE_FUNC          3
+#define UNWIND_HINT_TYPE_ENTRY         4
+#define UNWIND_HINT_TYPE_SAVE          5
+#define UNWIND_HINT_TYPE_RESTORE       6
 
 #ifdef CONFIG_OBJTOOL
 
@@ -124,7 +129,7 @@ struct unwind_hint {
  * the debuginfo as necessary.  It will also warn if it sees any
  * inconsistencies.
  */
-.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0
+.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0
 .Lunwind_hint_ip_\@:
        .pushsection .discard.unwind_hints
                /* struct unwind_hint */
@@ -143,6 +148,12 @@ struct unwind_hint {
        .popsection
 .endm
 
+.macro STACK_FRAME_NON_STANDARD_FP func:req
+#ifdef CONFIG_FRAME_POINTER
+       STACK_FRAME_NON_STANDARD \func
+#endif
+.endm
+
 .macro ANNOTATE_NOENDBR
 .Lhere_\@:
        .pushsection .discard.noendbr
@@ -171,7 +182,7 @@ struct unwind_hint {
 #define ASM_REACHABLE
 #else
 #define ANNOTATE_INTRA_FUNCTION_CALL
-.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0
+.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0
 .endm
 .macro STACK_FRAME_NON_STANDARD func:req
 .endm
index 0197042..1ecdb91 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 #ifndef _ASM_GENERIC_FCNTL_H
 #define _ASM_GENERIC_FCNTL_H
 
@@ -90,7 +91,7 @@
 
 /* a horrid kludge trying to make sure that this will fail on old kernels */
 #define O_TMPFILE (__O_TMPFILE | O_DIRECTORY)
-#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT)
+#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT)      
 
 #ifndef O_NDELAY
 #define O_NDELAY       O_NONBLOCK
 #define F_GETSIG       11      /* for sockets. */
 #endif
 
+#if __BITS_PER_LONG == 32 || defined(__KERNEL__)
 #ifndef F_GETLK64
 #define F_GETLK64      12      /*  using 'struct flock64' */
 #define F_SETLK64      13
 #define F_SETLKW64     14
 #endif
+#endif /* __BITS_PER_LONG == 32 || defined(__KERNEL__) */
 
 #ifndef F_SETOWN_EX
 #define F_SETOWN_EX    15
@@ -178,6 +181,10 @@ struct f_owner_ex {
                                   blocking */
 #define LOCK_UN                8       /* remove lock */
 
+/*
+ * LOCK_MAND support has been removed from the kernel. We leave the symbols
+ * here to not break legacy builds, but these should not be used in new code.
+ */
 #define LOCK_MAND      32      /* This is a mandatory flock ... */
 #define LOCK_READ      64      /* which allows concurrent read operations */
 #define LOCK_WRITE     128     /* which allows concurrent write operations */
@@ -185,6 +192,7 @@ struct f_owner_ex {
 
 #define F_LINUX_SPECIFIC_BASE  1024
 
+#ifndef HAVE_ARCH_STRUCT_FLOCK
 struct flock {
        short   l_type;
        short   l_whence;
@@ -209,5 +217,6 @@ struct flock64 {
        __ARCH_FLOCK64_PAD
 #endif
 };
+#endif /* HAVE_ARCH_STRUCT_FLOCK */
 
 #endif /* _ASM_GENERIC_FCNTL_H */
index 05c3642..a2def7b 100644 (file)
@@ -154,25 +154,77 @@ enum i915_mocs_table_index {
        I915_MOCS_CACHED,
 };
 
-/*
+/**
+ * enum drm_i915_gem_engine_class - uapi engine type enumeration
+ *
  * Different engines serve different roles, and there may be more than one
- * engine serving each role. enum drm_i915_gem_engine_class provides a
- * classification of the role of the engine, which may be used when requesting
- * operations to be performed on a certain subset of engines, or for providing
- * information about that group.
+ * engine serving each role.  This enum provides a classification of the role
+ * of the engine, which may be used when requesting operations to be performed
+ * on a certain subset of engines, or for providing information about that
+ * group.
  */
 enum drm_i915_gem_engine_class {
+       /**
+        * @I915_ENGINE_CLASS_RENDER:
+        *
+        * Render engines support instructions used for 3D, Compute (GPGPU),
+        * and programmable media workloads.  These instructions fetch data and
+        * dispatch individual work items to threads that operate in parallel.
+        * The threads run small programs (called "kernels" or "shaders") on
+        * the GPU's execution units (EUs).
+        */
        I915_ENGINE_CLASS_RENDER        = 0,
+
+       /**
+        * @I915_ENGINE_CLASS_COPY:
+        *
+        * Copy engines (also referred to as "blitters") support instructions
+        * that move blocks of data from one location in memory to another,
+        * or that fill a specified location of memory with fixed data.
+        * Copy engines can perform pre-defined logical or bitwise operations
+        * on the source, destination, or pattern data.
+        */
        I915_ENGINE_CLASS_COPY          = 1,
+
+       /**
+        * @I915_ENGINE_CLASS_VIDEO:
+        *
+        * Video engines (also referred to as "bit stream decode" (BSD) or
+        * "vdbox") support instructions that perform fixed-function media
+        * decode and encode.
+        */
        I915_ENGINE_CLASS_VIDEO         = 2,
+
+       /**
+        * @I915_ENGINE_CLASS_VIDEO_ENHANCE:
+        *
+        * Video enhancement engines (also referred to as "vebox") support
+        * instructions related to image enhancement.
+        */
        I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
 
-       /* should be kept compact */
+       /**
+        * @I915_ENGINE_CLASS_COMPUTE:
+        *
+        * Compute engines support a subset of the instructions available
+        * on render engines:  compute engines support Compute (GPGPU) and
+        * programmable media workloads, but do not support the 3D pipeline.
+        */
+       I915_ENGINE_CLASS_COMPUTE       = 4,
+
+       /* Values in this enum should be kept compact. */
 
+       /**
+        * @I915_ENGINE_CLASS_INVALID:
+        *
+        * Placeholder value to represent an invalid engine class assignment.
+        */
        I915_ENGINE_CLASS_INVALID       = -1
 };
 
-/*
+/**
+ * struct i915_engine_class_instance - Engine class/instance identifier
+ *
  * There may be more than one engine fulfilling any role within the system.
  * Each engine of a class is given a unique instance number and therefore
  * any engine can be specified by its class:instance tuplet. APIs that allow
@@ -180,10 +232,21 @@ enum drm_i915_gem_engine_class {
  * for this identification.
  */
 struct i915_engine_class_instance {
-       __u16 engine_class; /* see enum drm_i915_gem_engine_class */
-       __u16 engine_instance;
+       /**
+        * @engine_class:
+        *
+        * Engine class from enum drm_i915_gem_engine_class
+        */
+       __u16 engine_class;
 #define I915_ENGINE_CLASS_INVALID_NONE -1
 #define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
+
+       /**
+        * @engine_instance:
+        *
+        * Engine instance.
+        */
+       __u16 engine_instance;
 };
 
 /**
@@ -2657,24 +2720,65 @@ enum drm_i915_perf_record_type {
        DRM_I915_PERF_RECORD_MAX /* non-ABI */
 };
 
-/*
+/**
+ * struct drm_i915_perf_oa_config
+ *
  * Structure to upload perf dynamic configuration into the kernel.
  */
 struct drm_i915_perf_oa_config {
-       /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
+       /**
+        * @uuid:
+        *
+        * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x"
+        */
        char uuid[36];
 
+       /**
+        * @n_mux_regs:
+        *
+        * Number of mux regs in &mux_regs_ptr.
+        */
        __u32 n_mux_regs;
+
+       /**
+        * @n_boolean_regs:
+        *
+        * Number of boolean regs in &boolean_regs_ptr.
+        */
        __u32 n_boolean_regs;
+
+       /**
+        * @n_flex_regs:
+        *
+        * Number of flex regs in &flex_regs_ptr.
+        */
        __u32 n_flex_regs;
 
-       /*
-        * These fields are pointers to tuples of u32 values (register address,
-        * value). For example the expected length of the buffer pointed by
-        * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
+       /**
+        * @mux_regs_ptr:
+        *
+        * Pointer to tuples of u32 values (register address, value) for mux
+        * registers.  Expected length of buffer is (2 * sizeof(u32) *
+        * &n_mux_regs).
         */
        __u64 mux_regs_ptr;
+
+       /**
+        * @boolean_regs_ptr:
+        *
+        * Pointer to tuples of u32 values (register address, value) for mux
+        * registers.  Expected length of buffer is (2 * sizeof(u32) *
+        * &n_boolean_regs).
+        */
        __u64 boolean_regs_ptr;
+
+       /**
+        * @flex_regs_ptr:
+        *
+        * Pointer to tuples of u32 values (register address, value) for mux
+        * registers.  Expected length of buffer is (2 * sizeof(u32) *
+        * &n_flex_regs).
+        */
        __u64 flex_regs_ptr;
 };
 
@@ -2685,12 +2789,24 @@ struct drm_i915_perf_oa_config {
  * @data_ptr is also depends on the specific @query_id.
  */
 struct drm_i915_query_item {
-       /** @query_id: The id for this query */
+       /**
+        * @query_id:
+        *
+        * The id for this query.  Currently accepted query IDs are:
+        *  - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info)
+        *  - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info)
+        *  - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config)
+        *  - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions)
+        *  - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`)
+        *  - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info)
+        */
        __u64 query_id;
-#define DRM_I915_QUERY_TOPOLOGY_INFO    1
-#define DRM_I915_QUERY_ENGINE_INFO     2
-#define DRM_I915_QUERY_PERF_CONFIG      3
-#define DRM_I915_QUERY_MEMORY_REGIONS   4
+#define DRM_I915_QUERY_TOPOLOGY_INFO           1
+#define DRM_I915_QUERY_ENGINE_INFO             2
+#define DRM_I915_QUERY_PERF_CONFIG             3
+#define DRM_I915_QUERY_MEMORY_REGIONS          4
+#define DRM_I915_QUERY_HWCONFIG_BLOB           5
+#define DRM_I915_QUERY_GEOMETRY_SUBSLICES      6
 /* Must be kept compact -- no holes and well documented */
 
        /**
@@ -2706,14 +2822,17 @@ struct drm_i915_query_item {
        /**
         * @flags:
         *
-        * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
+        * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
         *
-        * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
+        * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the
         * following:
         *
-        *      - DRM_I915_QUERY_PERF_CONFIG_LIST
-        *      - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
-        *      - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
+        *      - %DRM_I915_QUERY_PERF_CONFIG_LIST
+        *      - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
+        *      - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
+        *
+        * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain
+        * a struct i915_engine_class_instance that references a render engine.
         */
        __u32 flags;
 #define DRM_I915_QUERY_PERF_CONFIG_LIST          1
@@ -2771,66 +2890,112 @@ struct drm_i915_query {
        __u64 items_ptr;
 };
 
-/*
- * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
- *
- * data: contains the 3 pieces of information :
- *
- * - the slice mask with one bit per slice telling whether a slice is
- *   available. The availability of slice X can be queried with the following
- *   formula :
- *
- *           (data[X / 8] >> (X % 8)) & 1
- *
- * - the subslice mask for each slice with one bit per subslice telling
- *   whether a subslice is available. Gen12 has dual-subslices, which are
- *   similar to two gen11 subslices. For gen12, this array represents dual-
- *   subslices. The availability of subslice Y in slice X can be queried
- *   with the following formula :
- *
- *           (data[subslice_offset +
- *                 X * subslice_stride +
- *                 Y / 8] >> (Y % 8)) & 1
- *
- * - the EU mask for each subslice in each slice with one bit per EU telling
- *   whether an EU is available. The availability of EU Z in subslice Y in
- *   slice X can be queried with the following formula :
+/**
+ * struct drm_i915_query_topology_info
  *
- *           (data[eu_offset +
- *                 (X * max_subslices + Y) * eu_stride +
- *                 Z / 8] >> (Z % 8)) & 1
+ * Describes slice/subslice/EU information queried by
+ * %DRM_I915_QUERY_TOPOLOGY_INFO
  */
 struct drm_i915_query_topology_info {
-       /*
+       /**
+        * @flags:
+        *
         * Unused for now. Must be cleared to zero.
         */
        __u16 flags;
 
+       /**
+        * @max_slices:
+        *
+        * The number of bits used to express the slice mask.
+        */
        __u16 max_slices;
+
+       /**
+        * @max_subslices:
+        *
+        * The number of bits used to express the subslice mask.
+        */
        __u16 max_subslices;
+
+       /**
+        * @max_eus_per_subslice:
+        *
+        * The number of bits in the EU mask that correspond to a single
+        * subslice's EUs.
+        */
        __u16 max_eus_per_subslice;
 
-       /*
+       /**
+        * @subslice_offset:
+        *
         * Offset in data[] at which the subslice masks are stored.
         */
        __u16 subslice_offset;
 
-       /*
+       /**
+        * @subslice_stride:
+        *
         * Stride at which each of the subslice masks for each slice are
         * stored.
         */
        __u16 subslice_stride;
 
-       /*
+       /**
+        * @eu_offset:
+        *
         * Offset in data[] at which the EU masks are stored.
         */
        __u16 eu_offset;
 
-       /*
+       /**
+        * @eu_stride:
+        *
         * Stride at which each of the EU masks for each subslice are stored.
         */
        __u16 eu_stride;
 
+       /**
+        * @data:
+        *
+        * Contains 3 pieces of information :
+        *
+        * - The slice mask with one bit per slice telling whether a slice is
+        *   available. The availability of slice X can be queried with the
+        *   following formula :
+        *
+        *   .. code:: c
+        *
+        *      (data[X / 8] >> (X % 8)) & 1
+        *
+        *   Starting with Xe_HP platforms, Intel hardware no longer has
+        *   traditional slices so i915 will always report a single slice
+        *   (hardcoded slicemask = 0x1) which contains all of the platform's
+        *   subslices.  I.e., the mask here does not reflect any of the newer
+        *   hardware concepts such as "gslices" or "cslices" since userspace
+        *   is capable of inferring those from the subslice mask.
+        *
+        * - The subslice mask for each slice with one bit per subslice telling
+        *   whether a subslice is available.  Starting with Gen12 we use the
+        *   term "subslice" to refer to what the hardware documentation
+        *   describes as a "dual-subslices."  The availability of subslice Y
+        *   in slice X can be queried with the following formula :
+        *
+        *   .. code:: c
+        *
+        *      (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1
+        *
+        * - The EU mask for each subslice in each slice, with one bit per EU
+        *   telling whether an EU is available. The availability of EU Z in
+        *   subslice Y in slice X can be queried with the following formula :
+        *
+        *   .. code:: c
+        *
+        *      (data[eu_offset +
+        *            (X * max_subslices + Y) * eu_stride +
+        *            Z / 8
+        *       ] >> (Z % 8)) & 1
+        */
        __u8 data[];
 };
 
@@ -2951,52 +3116,68 @@ struct drm_i915_query_engine_info {
        struct drm_i915_engine_info engines[];
 };
 
-/*
- * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
+/**
+ * struct drm_i915_query_perf_config
+ *
+ * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and
+ * %DRM_I915_QUERY_GEOMETRY_SUBSLICES.
  */
 struct drm_i915_query_perf_config {
        union {
-               /*
-                * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
-                * this fields to the number of configurations available.
+               /**
+                * @n_configs:
+                *
+                * When &drm_i915_query_item.flags ==
+                * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to
+                * the number of configurations available.
                 */
                __u64 n_configs;
 
-               /*
-                * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
-                * i915 will use the value in this field as configuration
-                * identifier to decide what data to write into config_ptr.
+               /**
+                * @config:
+                *
+                * When &drm_i915_query_item.flags ==
+                * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the
+                * value in this field as configuration identifier to decide
+                * what data to write into config_ptr.
                 */
                __u64 config;
 
-               /*
-                * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
-                * i915 will use the value in this field as configuration
-                * identifier to decide what data to write into config_ptr.
+               /**
+                * @uuid:
+                *
+                * When &drm_i915_query_item.flags ==
+                * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the
+                * value in this field as configuration identifier to decide
+                * what data to write into config_ptr.
                 *
                 * String formatted like "%08x-%04x-%04x-%04x-%012x"
                 */
                char uuid[36];
        };
 
-       /*
+       /**
+        * @flags:
+        *
         * Unused for now. Must be cleared to zero.
         */
        __u32 flags;
 
-       /*
-        * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
-        * write an array of __u64 of configuration identifiers.
+       /**
+        * @data:
         *
-        * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
-        * write a struct drm_i915_perf_oa_config. If the following fields of
-        * drm_i915_perf_oa_config are set not set to 0, i915 will write into
-        * the associated pointers the values of submitted when the
+        * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST,
+        * i915 will write an array of __u64 of configuration identifiers.
+        *
+        * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA,
+        * i915 will write a struct drm_i915_perf_oa_config. If the following
+        * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will
+        * write into the associated pointers the values of submitted when the
         * configuration was created :
         *
-        *         - n_mux_regs
-        *         - n_boolean_regs
-        *         - n_flex_regs
+        *  - &drm_i915_perf_oa_config.n_mux_regs
+        *  - &drm_i915_perf_oa_config.n_boolean_regs
+        *  - &drm_i915_perf_oa_config.n_flex_regs
         */
        __u8 data[];
 };
@@ -3135,6 +3316,16 @@ struct drm_i915_query_memory_regions {
 };
 
 /**
+ * DOC: GuC HWCONFIG blob uAPI
+ *
+ * The GuC produces a blob with information about the current device.
+ * i915 reads this blob from GuC and makes it available via this uAPI.
+ *
+ * The format and meaning of the blob content are documented in the
+ * Programmer's Reference Manual.
+ */
+
+/**
  * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
  * extension support using struct i915_user_extension.
  *
index f4009db..ef78e0e 100644 (file)
@@ -5222,22 +5222,25 @@ union bpf_attr {
  *     Return
  *             Nothing. Always succeeds.
  *
- * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset)
+ * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset, u64 flags)
  *     Description
  *             Read *len* bytes from *src* into *dst*, starting from *offset*
  *             into *src*.
+ *             *flags* is currently unused.
  *     Return
  *             0 on success, -E2BIG if *offset* + *len* exceeds the length
- *             of *src*'s data, -EINVAL if *src* is an invalid dynptr.
+ *             of *src*'s data, -EINVAL if *src* is an invalid dynptr or if
+ *             *flags* is not 0.
  *
- * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len)
+ * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags)
  *     Description
  *             Write *len* bytes from *src* into *dst*, starting from *offset*
  *             into *dst*.
+ *             *flags* is currently unused.
  *     Return
  *             0 on success, -E2BIG if *offset* + *len* exceeds the length
  *             of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
- *             is a read-only dynptr.
+ *             is a read-only dynptr or if *flags* is not 0.
  *
  * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len)
  *     Description
index 6a184d2..860f867 100644 (file)
@@ -444,6 +444,9 @@ struct kvm_run {
 #define KVM_SYSTEM_EVENT_SHUTDOWN       1
 #define KVM_SYSTEM_EVENT_RESET          2
 #define KVM_SYSTEM_EVENT_CRASH          3
+#define KVM_SYSTEM_EVENT_WAKEUP         4
+#define KVM_SYSTEM_EVENT_SUSPEND        5
+#define KVM_SYSTEM_EVENT_SEV_TERM       6
                        __u32 type;
                        __u32 ndata;
                        union {
@@ -646,6 +649,7 @@ struct kvm_vapic_addr {
 #define KVM_MP_STATE_OPERATING         7
 #define KVM_MP_STATE_LOAD              8
 #define KVM_MP_STATE_AP_RESET_HOLD     9
+#define KVM_MP_STATE_SUSPENDED         10
 
 struct kvm_mp_state {
        __u32 mp_state;
@@ -1150,8 +1154,9 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_S390_MEM_OP_EXTENSION 211
 #define KVM_CAP_PMU_CAPABILITY 212
 #define KVM_CAP_DISABLE_QUIRKS2 213
-/* #define KVM_CAP_VM_TSC_CONTROL 214 */
+#define KVM_CAP_VM_TSC_CONTROL 214
 #define KVM_CAP_SYSTEM_EVENT_DATA 215
+#define KVM_CAP_ARM_SYSTEM_SUSPEND 216
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1240,6 +1245,7 @@ struct kvm_x86_mce {
 #define KVM_XEN_HVM_CONFIG_SHARED_INFO         (1 << 2)
 #define KVM_XEN_HVM_CONFIG_RUNSTATE            (1 << 3)
 #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL       (1 << 4)
+#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND         (1 << 5)
 
 struct kvm_xen_hvm_config {
        __u32 flags;
@@ -1478,7 +1484,8 @@ struct kvm_s390_ucas_mapping {
 #define KVM_SET_PIT2              _IOW(KVMIO,  0xa0, struct kvm_pit_state2)
 /* Available with KVM_CAP_PPC_GET_PVINFO */
 #define KVM_PPC_GET_PVINFO       _IOW(KVMIO,  0xa1, struct kvm_ppc_pvinfo)
-/* Available with KVM_CAP_TSC_CONTROL */
+/* Available with KVM_CAP_TSC_CONTROL for a vCPU, or with
+*  KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */
 #define KVM_SET_TSC_KHZ           _IO(KVMIO,  0xa2)
 #define KVM_GET_TSC_KHZ           _IO(KVMIO,  0xa3)
 /* Available with KVM_CAP_PCI_2_3 */
@@ -1694,6 +1701,32 @@ struct kvm_xen_hvm_attr {
                struct {
                        __u64 gfn;
                } shared_info;
+               struct {
+                       __u32 send_port;
+                       __u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */
+                       __u32 flags;
+#define KVM_XEN_EVTCHN_DEASSIGN                (1 << 0)
+#define KVM_XEN_EVTCHN_UPDATE          (1 << 1)
+#define KVM_XEN_EVTCHN_RESET           (1 << 2)
+                       /*
+                        * Events sent by the guest are either looped back to
+                        * the guest itself (potentially on a different port#)
+                        * or signalled via an eventfd.
+                        */
+                       union {
+                               struct {
+                                       __u32 port;
+                                       __u32 vcpu;
+                                       __u32 priority;
+                               } port;
+                               struct {
+                                       __u32 port; /* Zero for eventfd */
+                                       __s32 fd;
+                               } eventfd;
+                               __u32 padding[4];
+                       } deliver;
+               } evtchn;
+               __u32 xen_version;
                __u64 pad[8];
        } u;
 };
@@ -1702,11 +1735,17 @@ struct kvm_xen_hvm_attr {
 #define KVM_XEN_ATTR_TYPE_LONG_MODE            0x0
 #define KVM_XEN_ATTR_TYPE_SHARED_INFO          0x1
 #define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR                0x2
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
+#define KVM_XEN_ATTR_TYPE_EVTCHN               0x3
+#define KVM_XEN_ATTR_TYPE_XEN_VERSION          0x4
 
 /* Per-vCPU Xen attributes */
 #define KVM_XEN_VCPU_GET_ATTR  _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr)
 #define KVM_XEN_VCPU_SET_ATTR  _IOW(KVMIO,  0xcb, struct kvm_xen_vcpu_attr)
 
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
+#define KVM_XEN_HVM_EVTCHN_SEND        _IOW(KVMIO,  0xd0, struct kvm_irq_routing_xen_evtchn)
+
 #define KVM_GET_SREGS2             _IOR(KVMIO,  0xcc, struct kvm_sregs2)
 #define KVM_SET_SREGS2             _IOW(KVMIO,  0xcd, struct kvm_sregs2)
 
@@ -1724,6 +1763,13 @@ struct kvm_xen_vcpu_attr {
                        __u64 time_blocked;
                        __u64 time_offline;
                } runstate;
+               __u32 vcpu_id;
+               struct {
+                       __u32 port;
+                       __u32 priority;
+                       __u64 expires_ns;
+               } timer;
+               __u8 vector;
        } u;
 };
 
@@ -1734,6 +1780,10 @@ struct kvm_xen_vcpu_attr {
 #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT        0x3
 #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA   0x4
 #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
+#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID         0x6
+#define KVM_XEN_VCPU_ATTR_TYPE_TIMER           0x7
+#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR   0x8
 
 /* Secure Encrypted Virtualization command */
 enum sev_cmd_id {
@@ -2033,7 +2083,8 @@ struct kvm_stats_header {
 #define KVM_STATS_UNIT_BYTES           (0x1 << KVM_STATS_UNIT_SHIFT)
 #define KVM_STATS_UNIT_SECONDS         (0x2 << KVM_STATS_UNIT_SHIFT)
 #define KVM_STATS_UNIT_CYCLES          (0x3 << KVM_STATS_UNIT_SHIFT)
-#define KVM_STATS_UNIT_MAX             KVM_STATS_UNIT_CYCLES
+#define KVM_STATS_UNIT_BOOLEAN         (0x4 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_MAX             KVM_STATS_UNIT_BOOLEAN
 
 #define KVM_STATS_BASE_SHIFT           8
 #define KVM_STATS_BASE_MASK            (0xF << KVM_STATS_BASE_SHIFT)
index e998764..a5e06dc 100644 (file)
@@ -272,6 +272,15 @@ struct prctl_mm_map {
 # define PR_SCHED_CORE_SCOPE_THREAD_GROUP      1
 # define PR_SCHED_CORE_SCOPE_PROCESS_GROUP     2
 
+/* arm64 Scalable Matrix Extension controls */
+/* Flag values must be in sync with SVE versions */
+#define PR_SME_SET_VL                  63      /* set task vector length */
+# define PR_SME_SET_VL_ONEXEC          (1 << 18) /* defer effect until exec */
+#define PR_SME_GET_VL                  64      /* get task vector length */
+/* Bits common to PR_SME_SET_VL and PR_SME_GET_VL */
+# define PR_SME_VL_LEN_MASK            0xffff
+# define PR_SME_VL_INHERIT             (1 << 17) /* inherit across exec */
+
 #define PR_SET_VMA             0x53564d41
 # define PR_SET_VMA_ANON_NAME          0
 
index 5d99e7c..cab645d 100644 (file)
 
 /* Set or get vhost backend capability */
 
-/* Use message type V2 */
-#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
-/* IOTLB can accept batching hints */
-#define VHOST_BACKEND_F_IOTLB_BATCH  0x2
-
 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
 #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
 
 /* Get the valid iova range */
 #define VHOST_VDPA_GET_IOVA_RANGE      _IOR(VHOST_VIRTIO, 0x78, \
                                             struct vhost_vdpa_iova_range)
-
 /* Get the config size */
 #define VHOST_VDPA_GET_CONFIG_SIZE     _IOR(VHOST_VIRTIO, 0x79, __u32)
 
 /* Get the count of all virtqueues */
 #define VHOST_VDPA_GET_VQS_COUNT       _IOR(VHOST_VIRTIO, 0x80, __u32)
 
+/* Get the number of virtqueue groups. */
+#define VHOST_VDPA_GET_GROUP_NUM       _IOR(VHOST_VIRTIO, 0x81, __u32)
+
+/* Get the number of address spaces. */
+#define VHOST_VDPA_GET_AS_NUM          _IOR(VHOST_VIRTIO, 0x7A, unsigned int)
+
+/* Get the group for a virtqueue: read index, write group in num,
+ * The virtqueue index is stored in the index field of
+ * vhost_vring_state. The group for this specific virtqueue is
+ * returned via num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_GET_VRING_GROUP     _IOWR(VHOST_VIRTIO, 0x7B,       \
+                                             struct vhost_vring_state)
+/* Set the ASID for a virtqueue group. The group index is stored in
+ * the index field of vhost_vring_state, the ASID associated with this
+ * group is stored at num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_SET_GROUP_ASID      _IOW(VHOST_VIRTIO, 0x7C, \
+                                            struct vhost_vring_state)
+
 #endif
index 5a5bd74..9c366b3 100755 (executable)
@@ -1646,7 +1646,8 @@ Press any other key to refresh statistics immediately.
                          .format(values))
             if len(pids) > 1:
                 sys.exit('Error: Multiple processes found (pids: {}). Use "-p"'
-                         ' to specify the desired pid'.format(" ".join(pids)))
+                         ' to specify the desired pid'
+                         .format(" ".join(map(str, pids))))
             namespace.pid = pids[0]
 
     argparser = argparse.ArgumentParser(description=description_text,
index c1d5867..952f352 100644 (file)
@@ -149,23 +149,30 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
                        int fd, group_fd, *evsel_fd;
 
                        evsel_fd = FD(evsel, idx, thread);
-                       if (evsel_fd == NULL)
-                               return -EINVAL;
+                       if (evsel_fd == NULL) {
+                               err = -EINVAL;
+                               goto out;
+                       }
 
                        err = get_group_fd(evsel, idx, thread, &group_fd);
                        if (err < 0)
-                               return err;
+                               goto out;
 
                        fd = sys_perf_event_open(&evsel->attr,
                                                 threads->map[thread].pid,
                                                 cpu, group_fd, 0);
 
-                       if (fd < 0)
-                               return -errno;
+                       if (fd < 0) {
+                               err = -errno;
+                               goto out;
+                       }
 
                        *evsel_fd = fd;
                }
        }
+out:
+       if (err)
+               perf_evsel__close(evsel);
 
        return err;
 }
index 8b990a5..c260006 100644 (file)
@@ -787,3 +787,8 @@ bool arch_is_retpoline(struct symbol *sym)
 {
        return !strncmp(sym->name, "__x86_indirect_", 15);
 }
+
+bool arch_is_rethunk(struct symbol *sym)
+{
+       return !strcmp(sym->name, "__x86_return_thunk");
+}
index f4c3a50..24fbe80 100644 (file)
@@ -68,6 +68,8 @@ const struct option check_options[] = {
        OPT_BOOLEAN('n', "noinstr", &opts.noinstr, "validate noinstr rules"),
        OPT_BOOLEAN('o', "orc", &opts.orc, "generate ORC metadata"),
        OPT_BOOLEAN('r', "retpoline", &opts.retpoline, "validate and annotate retpoline usage"),
+       OPT_BOOLEAN(0,   "rethunk", &opts.rethunk, "validate and annotate rethunk usage"),
+       OPT_BOOLEAN(0,   "unret", &opts.unret, "validate entry unret placement"),
        OPT_BOOLEAN('l', "sls", &opts.sls, "validate straight-line-speculation mitigations"),
        OPT_BOOLEAN('s', "stackval", &opts.stackval, "validate frame pointer rules"),
        OPT_BOOLEAN('t', "static-call", &opts.static_call, "annotate static calls"),
@@ -123,6 +125,7 @@ static bool opts_valid(void)
            opts.noinstr                ||
            opts.orc                    ||
            opts.retpoline              ||
+           opts.rethunk                ||
            opts.sls                    ||
            opts.stackval               ||
            opts.static_call            ||
@@ -135,6 +138,11 @@ static bool opts_valid(void)
                return true;
        }
 
+       if (opts.unret && !opts.rethunk) {
+               ERROR("--unret requires --rethunk");
+               return false;
+       }
+
        if (opts.dump_orc)
                return true;
 
@@ -163,6 +171,11 @@ static bool link_opts_valid(struct objtool_file *file)
                return false;
        }
 
+       if (opts.unret) {
+               ERROR("--unret requires --link");
+               return false;
+       }
+
        return true;
 }
 
index 864bb9d..b341f8a 100644 (file)
@@ -376,7 +376,8 @@ static int decode_instructions(struct objtool_file *file)
                        sec->text = true;
 
                if (!strcmp(sec->name, ".noinstr.text") ||
-                   !strcmp(sec->name, ".entry.text"))
+                   !strcmp(sec->name, ".entry.text") ||
+                   !strncmp(sec->name, ".text.__x86.", 12))
                        sec->noinstr = true;
 
                for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
@@ -749,6 +750,52 @@ static int create_retpoline_sites_sections(struct objtool_file *file)
        return 0;
 }
 
+static int create_return_sites_sections(struct objtool_file *file)
+{
+       struct instruction *insn;
+       struct section *sec;
+       int idx;
+
+       sec = find_section_by_name(file->elf, ".return_sites");
+       if (sec) {
+               WARN("file already has .return_sites, skipping");
+               return 0;
+       }
+
+       idx = 0;
+       list_for_each_entry(insn, &file->return_thunk_list, call_node)
+               idx++;
+
+       if (!idx)
+               return 0;
+
+       sec = elf_create_section(file->elf, ".return_sites", 0,
+                                sizeof(int), idx);
+       if (!sec) {
+               WARN("elf_create_section: .return_sites");
+               return -1;
+       }
+
+       idx = 0;
+       list_for_each_entry(insn, &file->return_thunk_list, call_node) {
+
+               int *site = (int *)sec->data->d_buf + idx;
+               *site = 0;
+
+               if (elf_add_reloc_to_insn(file->elf, sec,
+                                         idx * sizeof(int),
+                                         R_X86_64_PC32,
+                                         insn->sec, insn->offset)) {
+                       WARN("elf_add_reloc_to_insn: .return_sites");
+                       return -1;
+               }
+
+               idx++;
+       }
+
+       return 0;
+}
+
 static int create_ibt_endbr_seal_sections(struct objtool_file *file)
 {
        struct instruction *insn;
@@ -1083,6 +1130,11 @@ __weak bool arch_is_retpoline(struct symbol *sym)
        return false;
 }
 
+__weak bool arch_is_rethunk(struct symbol *sym)
+{
+       return false;
+}
+
 #define NEGATIVE_RELOC ((void *)-1L)
 
 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
@@ -1250,6 +1302,19 @@ static void add_retpoline_call(struct objtool_file *file, struct instruction *in
        annotate_call_site(file, insn, false);
 }
 
+static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
+{
+       /*
+        * Return thunk tail calls are really just returns in disguise,
+        * so convert them accordingly.
+        */
+       insn->type = INSN_RETURN;
+       insn->retpoline_safe = true;
+
+       if (add)
+               list_add_tail(&insn->call_node, &file->return_thunk_list);
+}
+
 static bool same_function(struct instruction *insn1, struct instruction *insn2)
 {
        return insn1->func->pfunc == insn2->func->pfunc;
@@ -1302,6 +1367,9 @@ static int add_jump_destinations(struct objtool_file *file)
                } else if (reloc->sym->retpoline_thunk) {
                        add_retpoline_call(file, insn);
                        continue;
+               } else if (reloc->sym->return_thunk) {
+                       add_return_call(file, insn, true);
+                       continue;
                } else if (insn->func) {
                        /*
                         * External sibling call or internal sibling call with
@@ -1320,6 +1388,21 @@ static int add_jump_destinations(struct objtool_file *file)
 
                jump_dest = find_insn(file, dest_sec, dest_off);
                if (!jump_dest) {
+                       struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
+
+                       /*
+                        * This is a special case for zen_untrain_ret().
+                        * It jumps to __x86_return_thunk(), but objtool
+                        * can't find the thunk's starting RET
+                        * instruction, because the RET is also in the
+                        * middle of another instruction.  Objtool only
+                        * knows about the outer instruction.
+                        */
+                       if (sym && sym->return_thunk) {
+                               add_return_call(file, insn, false);
+                               continue;
+                       }
+
                        WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
                                  insn->sec, insn->offset, dest_sec->name,
                                  dest_off);
@@ -1949,16 +2032,35 @@ static int read_unwind_hints(struct objtool_file *file)
 
                insn->hint = true;
 
-               if (opts.ibt && hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
+               if (hint->type == UNWIND_HINT_TYPE_SAVE) {
+                       insn->hint = false;
+                       insn->save = true;
+                       continue;
+               }
+
+               if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
+                       insn->restore = true;
+                       continue;
+               }
+
+               if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
                        struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
 
-                       if (sym && sym->bind == STB_GLOBAL &&
-                           insn->type != INSN_ENDBR && !insn->noendbr) {
-                               WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR",
-                                         insn->sec, insn->offset);
+                       if (sym && sym->bind == STB_GLOBAL) {
+                               if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
+                                       WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR",
+                                                 insn->sec, insn->offset);
+                               }
+
+                               insn->entry = 1;
                        }
                }
 
+               if (hint->type == UNWIND_HINT_TYPE_ENTRY) {
+                       hint->type = UNWIND_HINT_TYPE_CALL;
+                       insn->entry = 1;
+               }
+
                if (hint->type == UNWIND_HINT_TYPE_FUNC) {
                        insn->cfi = &func_cfi;
                        continue;
@@ -2032,8 +2134,10 @@ static int read_retpoline_hints(struct objtool_file *file)
                }
 
                if (insn->type != INSN_JUMP_DYNAMIC &&
-                   insn->type != INSN_CALL_DYNAMIC) {
-                       WARN_FUNC("retpoline_safe hint not an indirect jump/call",
+                   insn->type != INSN_CALL_DYNAMIC &&
+                   insn->type != INSN_RETURN &&
+                   insn->type != INSN_NOP) {
+                       WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop",
                                  insn->sec, insn->offset);
                        return -1;
                }
@@ -2184,6 +2288,9 @@ static int classify_symbols(struct objtool_file *file)
                        if (arch_is_retpoline(func))
                                func->retpoline_thunk = true;
 
+                       if (arch_is_rethunk(func))
+                               func->return_thunk = true;
+
                        if (!strcmp(func->name, "__fentry__"))
                                func->fentry = true;
 
@@ -3218,8 +3325,8 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
                        return 1;
                }
 
-               visited = 1 << state.uaccess;
-               if (insn->visited) {
+               visited = VISITED_BRANCH << state.uaccess;
+               if (insn->visited & VISITED_BRANCH_MASK) {
                        if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
                                return 1;
 
@@ -3233,6 +3340,35 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
                        state.instr += insn->instr;
 
                if (insn->hint) {
+                       if (insn->restore) {
+                               struct instruction *save_insn, *i;
+
+                               i = insn;
+                               save_insn = NULL;
+
+                               sym_for_each_insn_continue_reverse(file, func, i) {
+                                       if (i->save) {
+                                               save_insn = i;
+                                               break;
+                                       }
+                               }
+
+                               if (!save_insn) {
+                                       WARN_FUNC("no corresponding CFI save for CFI restore",
+                                                 sec, insn->offset);
+                                       return 1;
+                               }
+
+                               if (!save_insn->visited) {
+                                       WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo",
+                                                 sec, insn->offset);
+                                       return 1;
+                               }
+
+                               insn->cfi = save_insn->cfi;
+                               nr_cfi_reused++;
+                       }
+
                        state.cfi = *insn->cfi;
                } else {
                        /* XXX track if we actually changed state.cfi */
@@ -3433,6 +3569,145 @@ static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
        return warnings;
 }
 
+/*
+ * Validate rethunk entry constraint: must untrain RET before the first RET.
+ *
+ * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes
+ * before an actual RET instruction.
+ */
+static int validate_entry(struct objtool_file *file, struct instruction *insn)
+{
+       struct instruction *next, *dest;
+       int ret, warnings = 0;
+
+       for (;;) {
+               next = next_insn_to_validate(file, insn);
+
+               if (insn->visited & VISITED_ENTRY)
+                       return 0;
+
+               insn->visited |= VISITED_ENTRY;
+
+               if (!insn->ignore_alts && !list_empty(&insn->alts)) {
+                       struct alternative *alt;
+                       bool skip_orig = false;
+
+                       list_for_each_entry(alt, &insn->alts, list) {
+                               if (alt->skip_orig)
+                                       skip_orig = true;
+
+                               ret = validate_entry(file, alt->insn);
+                               if (ret) {
+                                       if (opts.backtrace)
+                                               BT_FUNC("(alt)", insn);
+                                       return ret;
+                               }
+                       }
+
+                       if (skip_orig)
+                               return 0;
+               }
+
+               switch (insn->type) {
+
+               case INSN_CALL_DYNAMIC:
+               case INSN_JUMP_DYNAMIC:
+               case INSN_JUMP_DYNAMIC_CONDITIONAL:
+                       WARN_FUNC("early indirect call", insn->sec, insn->offset);
+                       return 1;
+
+               case INSN_JUMP_UNCONDITIONAL:
+               case INSN_JUMP_CONDITIONAL:
+                       if (!is_sibling_call(insn)) {
+                               if (!insn->jump_dest) {
+                                       WARN_FUNC("unresolved jump target after linking?!?",
+                                                 insn->sec, insn->offset);
+                                       return -1;
+                               }
+                               ret = validate_entry(file, insn->jump_dest);
+                               if (ret) {
+                                       if (opts.backtrace) {
+                                               BT_FUNC("(branch%s)", insn,
+                                                       insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
+                                       }
+                                       return ret;
+                               }
+
+                               if (insn->type == INSN_JUMP_UNCONDITIONAL)
+                                       return 0;
+
+                               break;
+                       }
+
+                       /* fallthrough */
+               case INSN_CALL:
+                       dest = find_insn(file, insn->call_dest->sec,
+                                        insn->call_dest->offset);
+                       if (!dest) {
+                               WARN("Unresolved function after linking!?: %s",
+                                    insn->call_dest->name);
+                               return -1;
+                       }
+
+                       ret = validate_entry(file, dest);
+                       if (ret) {
+                               if (opts.backtrace)
+                                       BT_FUNC("(call)", insn);
+                               return ret;
+                       }
+                       /*
+                        * If a call returns without error, it must have seen UNTRAIN_RET.
+                        * Therefore any non-error return is a success.
+                        */
+                       return 0;
+
+               case INSN_RETURN:
+                       WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset);
+                       return 1;
+
+               case INSN_NOP:
+                       if (insn->retpoline_safe)
+                               return 0;
+                       break;
+
+               default:
+                       break;
+               }
+
+               if (!next) {
+                       WARN_FUNC("teh end!", insn->sec, insn->offset);
+                       return -1;
+               }
+               insn = next;
+       }
+
+       return warnings;
+}
+
+/*
+ * Validate that all branches starting at 'insn->entry' encounter UNRET_END
+ * before RET.
+ */
+static int validate_unret(struct objtool_file *file)
+{
+       struct instruction *insn;
+       int ret, warnings = 0;
+
+       for_each_insn(file, insn) {
+               if (!insn->entry)
+                       continue;
+
+               ret = validate_entry(file, insn);
+               if (ret < 0) {
+                       WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset);
+                       return ret;
+               }
+               warnings += ret;
+       }
+
+       return warnings;
+}
+
 static int validate_retpoline(struct objtool_file *file)
 {
        struct instruction *insn;
@@ -3440,7 +3715,8 @@ static int validate_retpoline(struct objtool_file *file)
 
        for_each_insn(file, insn) {
                if (insn->type != INSN_JUMP_DYNAMIC &&
-                   insn->type != INSN_CALL_DYNAMIC)
+                   insn->type != INSN_CALL_DYNAMIC &&
+                   insn->type != INSN_RETURN)
                        continue;
 
                if (insn->retpoline_safe)
@@ -3455,9 +3731,17 @@ static int validate_retpoline(struct objtool_file *file)
                if (!strcmp(insn->sec->name, ".init.text") && !opts.module)
                        continue;
 
-               WARN_FUNC("indirect %s found in RETPOLINE build",
-                         insn->sec, insn->offset,
-                         insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
+               if (insn->type == INSN_RETURN) {
+                       if (opts.rethunk) {
+                               WARN_FUNC("'naked' return found in RETHUNK build",
+                                         insn->sec, insn->offset);
+                       } else
+                               continue;
+               } else {
+                       WARN_FUNC("indirect %s found in RETPOLINE build",
+                                 insn->sec, insn->offset,
+                                 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
+               }
 
                warnings++;
        }
@@ -3826,8 +4110,7 @@ static int validate_ibt(struct objtool_file *file)
                    !strcmp(sec->name, "__bug_table")                   ||
                    !strcmp(sec->name, "__ex_table")                    ||
                    !strcmp(sec->name, "__jump_table")                  ||
-                   !strcmp(sec->name, "__mcount_loc")                  ||
-                   !strcmp(sec->name, "__tracepoints"))
+                   !strcmp(sec->name, "__mcount_loc"))
                        continue;
 
                list_for_each_entry(reloc, &sec->reloc->reloc_list, list)
@@ -3946,6 +4229,17 @@ int check(struct objtool_file *file)
                warnings += ret;
        }
 
+       if (opts.unret) {
+               /*
+                * Must be after validate_branch() and friends, it plays
+                * further games with insn->visited.
+                */
+               ret = validate_unret(file);
+               if (ret < 0)
+                       return ret;
+               warnings += ret;
+       }
+
        if (opts.ibt) {
                ret = validate_ibt(file);
                if (ret < 0)
@@ -3974,6 +4268,13 @@ int check(struct objtool_file *file)
                warnings += ret;
        }
 
+       if (opts.rethunk) {
+               ret = create_return_sites_sections(file);
+               if (ret < 0)
+                       goto out;
+               warnings += ret;
+       }
+
        if (opts.mcount) {
                ret = create_mcount_loc_sections(file);
                if (ret < 0)
index 9b19cc3..beb2f3a 100644 (file)
@@ -89,6 +89,7 @@ const char *arch_ret_insn(int len);
 int arch_decode_hint_reg(u8 sp_reg, int *base);
 
 bool arch_is_retpoline(struct symbol *sym);
+bool arch_is_rethunk(struct symbol *sym);
 
 int arch_rewrite_retpolines(struct objtool_file *file);
 
index 280ea18..42a52f1 100644 (file)
@@ -19,6 +19,8 @@ struct opts {
        bool noinstr;
        bool orc;
        bool retpoline;
+       bool rethunk;
+       bool unret;
        bool sls;
        bool stackval;
        bool static_call;
index f10d737..036129c 100644 (file)
@@ -46,16 +46,19 @@ struct instruction {
        enum insn_type type;
        unsigned long immediate;
 
-       u8 dead_end     : 1,
-          ignore       : 1,
-          ignore_alts  : 1,
-          hint         : 1,
-          retpoline_safe : 1,
-          noendbr      : 1;
-               /* 2 bit hole */
+       u16 dead_end            : 1,
+          ignore               : 1,
+          ignore_alts          : 1,
+          hint                 : 1,
+          save                 : 1,
+          restore              : 1,
+          retpoline_safe       : 1,
+          noendbr              : 1,
+          entry                : 1;
+               /* 7 bit hole */
+
        s8 instr;
        u8 visited;
-       /* u8 hole */
 
        struct alt_group *alt_group;
        struct symbol *call_dest;
@@ -69,6 +72,11 @@ struct instruction {
        struct cfi_state *cfi;
 };
 
+#define VISITED_BRANCH         0x01
+#define VISITED_BRANCH_UACCESS 0x02
+#define VISITED_BRANCH_MASK    0x03
+#define VISITED_ENTRY          0x04
+
 static inline bool is_static_jump(struct instruction *insn)
 {
        return insn->type == INSN_JUMP_CONDITIONAL ||
index adebfbc..16f4067 100644 (file)
@@ -57,6 +57,7 @@ struct symbol {
        u8 uaccess_safe      : 1;
        u8 static_call_tramp : 1;
        u8 retpoline_thunk   : 1;
+       u8 return_thunk      : 1;
        u8 fentry            : 1;
        u8 profiling_func    : 1;
        struct list_head pv_target;
index a6e72d9..7f2d1b0 100644 (file)
@@ -24,6 +24,7 @@ struct objtool_file {
        struct list_head insn_list;
        DECLARE_HASHTABLE(insn_hash, 20);
        struct list_head retpoline_call_list;
+       struct list_head return_thunk_list;
        struct list_head static_call_list;
        struct list_head mcount_loc_list;
        struct list_head endbr_list;
index 512669c..a7ecc32 100644 (file)
@@ -102,6 +102,7 @@ struct objtool_file *objtool_open_read(const char *_objname)
        INIT_LIST_HEAD(&file.insn_list);
        hash_init(file.insn_hash);
        INIT_LIST_HEAD(&file.retpoline_call_list);
+       INIT_LIST_HEAD(&file.return_thunk_list);
        INIT_LIST_HEAD(&file.static_call_list);
        INIT_LIST_HEAD(&file.mcount_loc_list);
        INIT_LIST_HEAD(&file.endbr_list);
index a75bf11..54d4e50 100644 (file)
@@ -891,7 +891,9 @@ static int copy_kcore_dir(struct perf_inject *inject)
        if (ret < 0)
                return ret;
        pr_debug("%s\n", cmd);
-       return system(cmd);
+       ret = system(cmd);
+       free(cmd);
+       return ret;
 }
 
 static int output_fd(struct perf_inject *inject)
@@ -916,7 +918,7 @@ static int __cmd_inject(struct perf_inject *inject)
                inject->tool.tracing_data = perf_event__repipe_tracing_data;
        }
 
-       output_data_offset = session->header.data_offset;
+       output_data_offset = perf_session__data_offset(session->evlist);
 
        if (inject->build_id_all) {
                inject->tool.mmap         = perf_event__repipe_buildid_mmap;
index 4ce87a8..d2ecd4d 100644 (file)
@@ -2586,6 +2586,8 @@ int cmd_stat(int argc, const char **argv)
        if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack))
                goto out;
 
+       /* Enable ignoring missing threads when -p option is defined. */
+       evlist__first(evsel_list)->ignore_missing_thread = target.pid;
        status = 0;
        for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) {
                if (stat_config.run_count != 1 && verbose > 0)
index 897fc50..f075cf3 100644 (file)
@@ -4280,6 +4280,7 @@ static int trace__replay(struct trace *trace)
                goto out;
 
        evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter");
+       trace->syscalls.events.sys_enter = evsel;
        /* older kernels have syscalls tp versus raw_syscalls */
        if (evsel == NULL)
                evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter");
@@ -4292,6 +4293,7 @@ static int trace__replay(struct trace *trace)
        }
 
        evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit");
+       trace->syscalls.events.sys_exit = evsel;
        if (evsel == NULL)
                evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit");
        if (evsel &&
index 5f57d98..4339692 100755 (executable)
@@ -61,7 +61,7 @@ def get_optional(perf_dict, field):
 
 def get_offset(perf_dict, field):
        if field in perf_dict:
-               return f"+0x{perf_dict[field]:x}"
+               return "+%#x" % perf_dict[field]
        return ""
 
 def get_dso_file_path(dso_name, dso_build_id):
@@ -76,7 +76,7 @@ def get_dso_file_path(dso_name, dso_build_id):
        else:
                append = "/elf"
 
-       dso_path = f"{os.environ['PERF_BUILDID_DIR']}/{dso_name}/{dso_build_id}{append}"
+       dso_path = os.environ['PERF_BUILDID_DIR'] + "/" + dso_name + "/" + dso_build_id + append;
        # Replace duplicate slash chars to single slash char
        dso_path = dso_path.replace('//', '/', 1)
        return dso_path
@@ -94,8 +94,8 @@ def read_disam(dso_fname, dso_start, start_addr, stop_addr):
                start_addr = start_addr - dso_start;
                stop_addr = stop_addr - dso_start;
                disasm = [ options.objdump_name, "-d", "-z",
-                          f"--start-address=0x{start_addr:x}",
-                          f"--stop-address=0x{stop_addr:x}" ]
+                          "--start-address="+format(start_addr,"#x"),
+                          "--stop-address="+format(stop_addr,"#x") ]
                disasm += [ dso_fname ]
                disasm_output = check_output(disasm).decode('utf-8').split('\n')
                disasm_cache[addr_range] = disasm_output
@@ -109,12 +109,14 @@ def print_disam(dso_fname, dso_start, start_addr, stop_addr):
                        m = disasm_re.search(line)
                        if m is None:
                                continue
-               print(f"\t{line}")
+               print("\t" + line)
 
 def print_sample(sample):
-       print(f"Sample = {{ cpu: {sample['cpu']:04} addr: 0x{sample['addr']:016x} " \
-             f"phys_addr: 0x{sample['phys_addr']:016x} ip: 0x{sample['ip']:016x} " \
-             f"pid: {sample['pid']} tid: {sample['tid']} period: {sample['period']} time: {sample['time']} }}")
+       print("Sample = { cpu: %04d addr: 0x%016x phys_addr: 0x%016x ip: 0x%016x " \
+             "pid: %d tid: %d period: %d time: %d }" % \
+             (sample['cpu'], sample['addr'], sample['phys_addr'], \
+              sample['ip'], sample['pid'], sample['tid'], \
+              sample['period'], sample['time']))
 
 def trace_begin():
        print('ARM CoreSight Trace Data Assembler Dump')
@@ -131,7 +133,7 @@ def common_start_str(comm, sample):
        cpu = sample["cpu"]
        pid = sample["pid"]
        tid = sample["tid"]
-       return f"{comm:>16} {pid:>5}/{tid:<5} [{cpu:04}] {sec:9}.{ns:09}  "
+       return "%16s %5u/%-5u [%04u] %9u.%09u  " % (comm, pid, tid, cpu, sec, ns)
 
 # This code is copied from intel-pt-events.py for printing source code
 # line and symbols.
@@ -171,7 +173,7 @@ def print_srccode(comm, param_dict, sample, symbol, dso):
        glb_line_number = line_number
        glb_source_file_name = source_file_name
 
-       print(f"{start_str}{src_str}")
+       print(start_str, src_str)
 
 def process_event(param_dict):
        global cache_size
@@ -188,7 +190,7 @@ def process_event(param_dict):
        symbol = get_optional(param_dict, "symbol")
 
        if (options.verbose == True):
-               print(f"Event type: {name}")
+               print("Event type: %s" % name)
                print_sample(sample)
 
        # If cannot find dso so cannot dump assembler, bail out
@@ -197,7 +199,7 @@ def process_event(param_dict):
 
        # Validate dso start and end addresses
        if ((dso_start == '[unknown]') or (dso_end == '[unknown]')):
-               print(f"Failed to find valid dso map for dso {dso}")
+               print("Failed to find valid dso map for dso %s" % dso)
                return
 
        if (name[0:12] == "instructions"):
@@ -244,15 +246,15 @@ def process_event(param_dict):
 
        # Handle CS_ETM_TRACE_ON packet if start_addr=0 and stop_addr=4
        if (start_addr == 0 and stop_addr == 4):
-               print(f"CPU{cpu}: CS_ETM_TRACE_ON packet is inserted")
+               print("CPU%d: CS_ETM_TRACE_ON packet is inserted" % cpu)
                return
 
        if (start_addr < int(dso_start) or start_addr > int(dso_end)):
-               print(f"Start address 0x{start_addr:x} is out of range [ 0x{dso_start:x} .. 0x{dso_end:x} ] for dso {dso}")
+               print("Start address 0x%x is out of range [ 0x%x .. 0x%x ] for dso %s" % (start_addr, int(dso_start), int(dso_end), dso))
                return
 
        if (stop_addr < int(dso_start) or stop_addr > int(dso_end)):
-               print(f"Stop address 0x{stop_addr:x} is out of range [ 0x{dso_start:x} .. 0x{dso_end:x} ] for dso {dso}")
+               print("Stop address 0x%x is out of range [ 0x%x .. 0x%x ] for dso %s" % (stop_addr, int(dso_start), int(dso_end), dso))
                return
 
        if (options.objdump_name != None):
@@ -267,6 +269,6 @@ def process_event(param_dict):
                if path.exists(dso_fname):
                        print_disam(dso_fname, dso_vm_start, start_addr, stop_addr)
                else:
-                       print(f"Failed to find dso {dso} for address range [ 0x{start_addr:x} .. 0x{stop_addr:x} ]")
+                       print("Failed to find dso %s for address range [ 0x%x .. 0x%x ]" % (dso, start_addr, stop_addr))
 
        print_srccode(comm, param_dict, sample, symbol, dso)
index d1ebb55..6f921db 100644 (file)
@@ -151,11 +151,21 @@ static int detect_ioctl(void)
 static int detect_share(int wp_cnt, int bp_cnt)
 {
        struct perf_event_attr attr;
-       int i, fd[wp_cnt + bp_cnt], ret;
+       int i, *fd = NULL, ret = -1;
+
+       if (wp_cnt + bp_cnt == 0)
+               return 0;
+
+       fd = malloc(sizeof(int) * (wp_cnt + bp_cnt));
+       if (!fd)
+               return -1;
 
        for (i = 0; i < wp_cnt; i++) {
                fd[i] = wp_event((void *)&the_var, &attr);
-               TEST_ASSERT_VAL("failed to create wp\n", fd[i] != -1);
+               if (fd[i] == -1) {
+                       pr_err("failed to create wp\n");
+                       goto out;
+               }
        }
 
        for (; i < (bp_cnt + wp_cnt); i++) {
@@ -166,9 +176,11 @@ static int detect_share(int wp_cnt, int bp_cnt)
 
        ret = i != (bp_cnt + wp_cnt);
 
+out:
        while (i--)
                close(fd[i]);
 
+       free(fd);
        return ret;
 }
 
index d54c537..5c0032f 100644 (file)
@@ -97,6 +97,8 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
        ret |= test(ctx, "2.2 > 2.2", 0);
        ret |= test(ctx, "2.2 < 1.1", 0);
        ret |= test(ctx, "1.1 > 2.2", 0);
+       ret |= test(ctx, "1.1e10 < 1.1e100", 1);
+       ret |= test(ctx, "1.1e2 > 1.1e-2", 1);
 
        if (ret) {
                expr__ctx_free(ctx);
index 4ad0dfb..7c7d20f 100644 (file)
@@ -20,8 +20,6 @@
 #include "tsc.h"
 #include "mmap.h"
 #include "tests.h"
-#include "pmu.h"
-#include "pmu-hybrid.h"
 
 /*
  * Except x86_64/i386 and Arm64, other archs don't support TSC in perf.  Just
@@ -106,28 +104,21 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
 
        evlist__config(evlist, &opts, NULL);
 
-       evsel = evlist__first(evlist);
-
-       evsel->core.attr.comm = 1;
-       evsel->core.attr.disabled = 1;
-       evsel->core.attr.enable_on_exec = 0;
-
-       /*
-        * For hybrid "cycles:u", it creates two events.
-        * Init the second evsel here.
-        */
-       if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) {
-               evsel = evsel__next(evsel);
+       /* For hybrid "cycles:u", it creates two events */
+       evlist__for_each_entry(evlist, evsel) {
                evsel->core.attr.comm = 1;
                evsel->core.attr.disabled = 1;
                evsel->core.attr.enable_on_exec = 0;
        }
 
-       if (evlist__open(evlist) == -ENOENT) {
-               err = TEST_SKIP;
+       ret = evlist__open(evlist);
+       if (ret < 0) {
+               if (ret == -ENOENT)
+                       err = TEST_SKIP;
+               else
+                       pr_debug("evlist__open() failed\n");
                goto out_err;
        }
-       CHECK__(evlist__open(evlist));
 
        CHECK__(evlist__mmap(evlist, UINT_MAX));
 
@@ -167,10 +158,12 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
                                goto next_event;
 
                        if (strcmp(event->comm.comm, comm1) == 0) {
+                               CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event));
                                CHECK__(evsel__parse_sample(evsel, event, &sample));
                                comm1_time = sample.time;
                        }
                        if (strcmp(event->comm.comm, comm2) == 0) {
+                               CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event));
                                CHECK__(evsel__parse_sample(evsel, event, &sample));
                                comm2_time = sample.time;
                        }
diff --git a/tools/perf/tests/shell/lib/perf_csv_output_lint.py b/tools/perf/tests/shell/lib/perf_csv_output_lint.py
deleted file mode 100644 (file)
index 714f283..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/python
-# SPDX-License-Identifier: GPL-2.0
-
-import argparse
-import sys
-
-# Basic sanity check of perf CSV output as specified in the man page.
-# Currently just checks the number of fields per line in output.
-
-ap = argparse.ArgumentParser()
-ap.add_argument('--no-args', action='store_true')
-ap.add_argument('--interval', action='store_true')
-ap.add_argument('--system-wide-no-aggr', action='store_true')
-ap.add_argument('--system-wide', action='store_true')
-ap.add_argument('--event', action='store_true')
-ap.add_argument('--per-core', action='store_true')
-ap.add_argument('--per-thread', action='store_true')
-ap.add_argument('--per-die', action='store_true')
-ap.add_argument('--per-node', action='store_true')
-ap.add_argument('--per-socket', action='store_true')
-ap.add_argument('--separator', default=',', nargs='?')
-args = ap.parse_args()
-
-Lines = sys.stdin.readlines()
-
-def check_csv_output(exp):
-  for line in Lines:
-    if 'failed' not in line:
-      count = line.count(args.separator)
-      if count != exp:
-        sys.stdout.write(''.join(Lines))
-        raise RuntimeError(f'wrong number of fields. expected {exp} in {line}')
-
-try:
-  if args.no_args or args.system_wide or args.event:
-    expected_items = 6
-  elif args.interval or args.per_thread or args.system_wide_no_aggr:
-    expected_items = 7
-  elif args.per_core or args.per_socket or args.per_node or args.per_die:
-    expected_items = 8
-  else:
-    ap.print_help()
-    raise RuntimeError('No checking option specified')
-  check_csv_output(expected_items)
-
-except:
-  sys.stdout.write('Test failed for input: ' + ''.join(Lines))
-  raise
index 983220e..38c26f3 100755 (executable)
@@ -6,20 +6,41 @@
 
 set -e
 
-pythonchecker=$(dirname $0)/lib/perf_csv_output_lint.py
-if [ "x$PYTHON" == "x" ]
-then
-       if which python3 > /dev/null
-       then
-               PYTHON=python3
-       elif which python > /dev/null
-       then
-               PYTHON=python
-       else
-               echo Skipping test, python not detected please set environment variable PYTHON.
-               exit 2
-       fi
-fi
+function commachecker()
+{
+       local -i cnt=0 exp=0
+
+       case "$1"
+       in "--no-args")         exp=6
+       ;; "--system-wide")     exp=6
+       ;; "--event")           exp=6
+       ;; "--interval")        exp=7
+       ;; "--per-thread")      exp=7
+       ;; "--system-wide-no-aggr")     exp=7
+                               [ $(uname -m) = "s390x" ] && exp=6
+       ;; "--per-core")        exp=8
+       ;; "--per-socket")      exp=8
+       ;; "--per-node")        exp=8
+       ;; "--per-die")         exp=8
+       esac
+
+       while read line
+       do
+               # Check for lines beginning with Failed
+               x=${line:0:6}
+               [ "$x" = "Failed" ] && continue
+
+               # Count the number of commas
+               x=$(echo $line | tr -d -c ',')
+               cnt="${#x}"
+               # echo $line $cnt
+               [ "$cnt" -ne "$exp" ] && {
+                       echo "wrong number of fields. expected $exp in $line" 1>&2
+                       exit 1;
+               }
+       done
+       return 0
+}
 
 # Return true if perf_event_paranoid is > $1 and not running as root.
 function ParanoidAndNotRoot()
@@ -30,7 +51,7 @@ function ParanoidAndNotRoot()
 check_no_args()
 {
        echo -n "Checking CSV output: no args "
-       perf stat -x, true 2>&1 | $PYTHON $pythonchecker --no-args
+       perf stat -x, true 2>&1 | commachecker --no-args
        echo "[Success]"
 }
 
@@ -42,7 +63,7 @@ check_system_wide()
                echo "[Skip] paranoid and not root"
                return
        fi
-       perf stat -x, -a true 2>&1 | $PYTHON $pythonchecker --system-wide
+       perf stat -x, -a true 2>&1 | commachecker --system-wide
        echo "[Success]"
 }
 
@@ -55,14 +76,14 @@ check_system_wide_no_aggr()
                return
        fi
        echo -n "Checking CSV output: system wide no aggregation "
-       perf stat -x, -A -a --no-merge true 2>&1 | $PYTHON $pythonchecker --system-wide-no-aggr
+       perf stat -x, -A -a --no-merge true 2>&1 | commachecker --system-wide-no-aggr
        echo "[Success]"
 }
 
 check_interval()
 {
        echo -n "Checking CSV output: interval "
-       perf stat -x, -I 1000 true 2>&1 | $PYTHON $pythonchecker --interval
+       perf stat -x, -I 1000 true 2>&1 | commachecker --interval
        echo "[Success]"
 }
 
@@ -70,7 +91,7 @@ check_interval()
 check_event()
 {
        echo -n "Checking CSV output: event "
-       perf stat -x, -e cpu-clock true 2>&1 | $PYTHON $pythonchecker --event
+       perf stat -x, -e cpu-clock true 2>&1 | commachecker --event
        echo "[Success]"
 }
 
@@ -82,7 +103,7 @@ check_per_core()
                echo "[Skip] paranoid and not root"
                return
        fi
-       perf stat -x, --per-core -a true 2>&1 | $PYTHON $pythonchecker --per-core
+       perf stat -x, --per-core -a true 2>&1 | commachecker --per-core
        echo "[Success]"
 }
 
@@ -94,7 +115,7 @@ check_per_thread()
                echo "[Skip] paranoid and not root"
                return
        fi
-       perf stat -x, --per-thread -a true 2>&1 | $PYTHON $pythonchecker --per-thread
+       perf stat -x, --per-thread -a true 2>&1 | commachecker --per-thread
        echo "[Success]"
 }
 
@@ -106,7 +127,7 @@ check_per_die()
                echo "[Skip] paranoid and not root"
                return
        fi
-       perf stat -x, --per-die -a true 2>&1 | $PYTHON $pythonchecker --per-die
+       perf stat -x, --per-die -a true 2>&1 | commachecker --per-die
        echo "[Success]"
 }
 
@@ -118,7 +139,7 @@ check_per_node()
                echo "[Skip] paranoid and not root"
                return
        fi
-       perf stat -x, --per-node -a true 2>&1 | $PYTHON $pythonchecker --per-node
+       perf stat -x, --per-node -a true 2>&1 | commachecker --per-node
        echo "[Success]"
 }
 
@@ -130,7 +151,7 @@ check_per_socket()
                echo "[Skip] paranoid and not root"
                return
        fi
-       perf stat -x, --per-socket -a true 2>&1 | $PYTHON $pythonchecker --per-socket
+       perf stat -x, --per-socket -a true 2>&1 | commachecker --per-socket
        echo "[Success]"
 }
 
index 6ffbb27..ec108d4 100755 (executable)
@@ -43,7 +43,7 @@ CFLAGS="-g -O0 -fno-inline -fno-omit-frame-pointer"
 cc $CFLAGS $TEST_PROGRAM_SOURCE -o $TEST_PROGRAM || exit 1
 
 # Add a 1 second delay to skip samples that are not in the leaf() function
-perf record -o $PERF_DATA --call-graph fp -e cycles//u -D 1000 -- $TEST_PROGRAM 2> /dev/null &
+perf record -o $PERF_DATA --call-graph fp -e cycles//u -D 1000 --user-callchains -- $TEST_PROGRAM 2> /dev/null &
 PID=$!
 
 echo " + Recording (PID=$PID)..."
index d23a9e3..0b4f61b 100644 (file)
@@ -115,7 +115,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
         * physical_package_id will be set to -1. Hence skip this
         * test if physical_package_id returns -1 for cpu from perf_cpu_map.
         */
-       if (strncmp(session->header.env.arch, "powerpc", 7)) {
+       if (!strncmp(session->header.env.arch, "ppc64le", 7)) {
                if (cpu__get_socket_id(perf_cpu_map__cpu(map, 0)) == -1)
                        return TEST_SKIP;
        }
index 2c5f72f..37c53ba 100755 (executable)
@@ -33,23 +33,13 @@ create_errno_lookup_func()
        local arch=$(arch_string "$1")
        local nr name
 
-       cat <<EoFuncBegin
-static const char *errno_to_name__$arch(int err)
-{
-       switch (err) {
-EoFuncBegin
+       printf "static const char *errno_to_name__%s(int err)\n{\n\tswitch (err) {\n" $arch
 
        while read name nr; do
                printf '\tcase %d: return "%s";\n' $nr $name
        done
 
-       cat <<EoFuncEnd
-       default:
-               return "(unknown)";
-       }
-}
-
-EoFuncEnd
+       printf '\tdefault: return "(unknown)";\n\t}\n}\n'
 }
 
 process_arch()
index 6f85f5d..17311ad 100644 (file)
@@ -50,6 +50,9 @@ struct linger {
 struct msghdr {
        void            *msg_name;      /* ptr to socket address structure */
        int             msg_namelen;    /* size of socket address structure */
+
+       int             msg_inq;        /* output, data left in socket */
+
        struct iov_iter msg_iter;       /* data */
 
        /*
@@ -62,8 +65,9 @@ struct msghdr {
                void __user     *msg_control_user;
        };
        bool            msg_control_is_user : 1;
-       __kernel_size_t msg_controllen; /* ancillary data buffer length */
+       bool            msg_get_inq : 1;/* return INQ after receive */
        unsigned int    msg_flags;      /* flags on received message */
+       __kernel_size_t msg_controllen; /* ancillary data buffer length */
        struct kiocb    *msg_iocb;      /* ptr to iocb for async requests */
 };
 
@@ -434,6 +438,7 @@ extern struct file *do_accept(struct file *file, unsigned file_flags,
 extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
                         int __user *upeer_addrlen, int flags);
 extern int __sys_socket(int family, int type, int protocol);
+extern struct file *__sys_socket_file(int family, int type, int protocol);
 extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen);
 extern int __sys_connect_file(struct file *file, struct sockaddr_storage *addr,
                              int addrlen, int file_flags);
index 1a80151..d040406 100644 (file)
@@ -387,26 +387,16 @@ static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
        return arm_spe_deliver_synth_event(spe, speq, event, &sample);
 }
 
-#define SPE_MEM_TYPE   (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS | \
-                        ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS | \
-                        ARM_SPE_REMOTE_ACCESS)
-
-static bool arm_spe__is_memory_event(enum arm_spe_sample_type type)
-{
-       if (type & SPE_MEM_TYPE)
-               return true;
-
-       return false;
-}
-
 static u64 arm_spe__synth_data_source(const struct arm_spe_record *record)
 {
        union perf_mem_data_src data_src = { 0 };
 
        if (record->op == ARM_SPE_LD)
                data_src.mem_op = PERF_MEM_OP_LOAD;
-       else
+       else if (record->op == ARM_SPE_ST)
                data_src.mem_op = PERF_MEM_OP_STORE;
+       else
+               return 0;
 
        if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
                data_src.mem_lvl = PERF_MEM_LVL_L3;
@@ -510,7 +500,11 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
                        return err;
        }
 
-       if (spe->sample_memory && arm_spe__is_memory_event(record->type)) {
+       /*
+        * When data_src is zero it means the record is not a memory operation,
+        * skip to synthesize memory sample for this case.
+        */
+       if (spe->sample_memory && data_src) {
                err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src);
                if (err)
                        return err;
index f8ad581..cdd6463 100644 (file)
@@ -63,20 +63,16 @@ static struct hashmap *bpf_map_hash;
 static struct bpf_perf_object *
 bpf_perf_object__next(struct bpf_perf_object *prev)
 {
-       struct bpf_perf_object *next;
-
-       if (!prev)
-               next = list_first_entry(&bpf_objects_list,
-                                       struct bpf_perf_object,
-                                       list);
-       else
-               next = list_next_entry(prev, list);
+       if (!prev) {
+               if (list_empty(&bpf_objects_list))
+                       return NULL;
 
-       /* Empty list is noticed here so don't need checking on entry. */
-       if (&next->list == &bpf_objects_list)
+               return list_first_entry(&bpf_objects_list, struct bpf_perf_object, list);
+       }
+       if (list_is_last(&prev->list, &bpf_objects_list))
                return NULL;
 
-       return next;
+       return list_next_entry(prev, list);
 }
 
 #define bpf_perf_object__for_each(perf_obj, tmp)       \
index e271e05..80b1d2b 100644 (file)
@@ -149,11 +149,10 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
                count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
                size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
 
-               data_len += count * size;
+               data_len += roundup(count * size, sizeof(__u64));
        }
 
        /* step 3: allocate continuous memory */
-       data_len = roundup(data_len, sizeof(__u64));
        info_linear = malloc(sizeof(struct perf_bpil) + data_len);
        if (!info_linear)
                return ERR_PTR(-ENOMEM);
@@ -180,7 +179,7 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
                bpf_prog_info_set_offset_u64(&info_linear->info,
                                             desc->array_offset,
                                             ptr_to_u64(ptr));
-               ptr += count * size;
+               ptr += roundup(count * size, sizeof(__u64));
        }
 
        /* step 5: call syscall again to get required arrays */
index b73e84a..f289b77 100644 (file)
@@ -265,6 +265,12 @@ int off_cpu_write(struct perf_session *session)
 
        sample_type = evsel->core.attr.sample_type;
 
+       if (sample_type & ~OFFCPU_SAMPLE_TYPES) {
+               pr_err("not supported sample type: %llx\n",
+                      (unsigned long long)sample_type);
+               return -1;
+       }
+
        if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) {
                if (evsel->core.id)
                        sid = evsel->core.id[0];
@@ -319,7 +325,6 @@ int off_cpu_write(struct perf_session *session)
                }
                if (sample_type & PERF_SAMPLE_CGROUP)
                        data.array[n++] = key.cgroup_id;
-               /* TODO: handle more sample types */
 
                size = n * sizeof(u64);
                data.hdr.size = size;
index 792ae28..cc6d7fd 100644 (file)
@@ -71,6 +71,11 @@ struct {
        __uint(max_entries, 1);
 } cgroup_filter SEC(".maps");
 
+/* new kernel task_struct definition */
+struct task_struct___new {
+       long __state;
+} __attribute__((preserve_access_index));
+
 /* old kernel task_struct definition */
 struct task_struct___old {
        long state;
@@ -93,14 +98,17 @@ const volatile bool uses_cgroup_v1 = false;
  */
 static inline int get_task_state(struct task_struct *t)
 {
-       if (bpf_core_field_exists(t->__state))
-               return BPF_CORE_READ(t, __state);
+       /* recast pointer to capture new type for compiler */
+       struct task_struct___new *t_new = (void *)t;
 
-       /* recast pointer to capture task_struct___old type for compiler */
-       struct task_struct___old *t_old = (void *)t;
+       if (bpf_core_field_exists(t_new->__state)) {
+               return BPF_CORE_READ(t_new, __state);
+       } else {
+               /* recast pointer to capture old type for compiler */
+               struct task_struct___old *t_old = (void *)t;
 
-       /* now use old "state" name of the field */
-       return BPF_CORE_READ(t_old, state);
+               return BPF_CORE_READ(t_old, state);
+       }
 }
 
 static inline __u64 get_cgroup_id(struct task_struct *t)
index 82f3d46..328668f 100644 (file)
@@ -872,6 +872,30 @@ out_free:
        return err;
 }
 
+static int filename__read_build_id_ns(const char *filename,
+                                     struct build_id *bid,
+                                     struct nsinfo *nsi)
+{
+       struct nscookie nsc;
+       int ret;
+
+       nsinfo__mountns_enter(nsi, &nsc);
+       ret = filename__read_build_id(filename, bid);
+       nsinfo__mountns_exit(&nsc);
+
+       return ret;
+}
+
+static bool dso__build_id_mismatch(struct dso *dso, const char *name)
+{
+       struct build_id bid;
+
+       if (filename__read_build_id_ns(name, &bid, dso->nsinfo) < 0)
+               return false;
+
+       return !dso__build_id_equal(dso, &bid);
+}
+
 static int dso__cache_build_id(struct dso *dso, struct machine *machine,
                               void *priv __maybe_unused)
 {
@@ -886,6 +910,10 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine,
                is_kallsyms = true;
                name = machine->mmap_name;
        }
+
+       if (!is_kallsyms && dso__build_id_mismatch(dso, name))
+               return 0;
+
        return build_id_cache__add_b(&dso->bid, name, dso->nsinfo,
                                     is_kallsyms, is_vdso);
 }
index ce499c5..094b0a9 100644 (file)
@@ -48,6 +48,7 @@
 #include "util.h"
 #include "hashmap.h"
 #include "pmu-hybrid.h"
+#include "off_cpu.h"
 #include "../perf-sys.h"
 #include "util/parse-branch-options.h"
 #include <internal/xyarray.h>
@@ -1102,6 +1103,11 @@ static void evsel__set_default_freq_period(struct record_opts *opts,
        }
 }
 
+static bool evsel__is_offcpu_event(struct evsel *evsel)
+{
+       return evsel__is_bpf_output(evsel) && !strcmp(evsel->name, OFFCPU_EVENT);
+}
+
 /*
  * The enable_on_exec/disabled value strategy:
  *
@@ -1366,6 +1372,9 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
         */
        if (evsel__is_dummy_event(evsel))
                evsel__reset_sample_bit(evsel, BRANCH_STACK);
+
+       if (evsel__is_offcpu_event(evsel))
+               evsel->core.attr.sample_type &= OFFCPU_SAMPLE_TYPES;
 }
 
 int evsel__set_filter(struct evsel *evsel, const char *filter)
index 0a13eb2..4dc8edb 100644 (file)
@@ -91,7 +91,7 @@ static int literal(yyscan_t scanner)
 }
 %}
 
-number         ([0-9]+\.?[0-9]*|[0-9]*\.?[0-9]+)
+number         ([0-9]+\.?[0-9]*|[0-9]*\.?[0-9]+)(e-?[0-9]+)?
 
 sch            [-,=]
 spec           \\{sch}
index 53332da..6ad629d 100644 (file)
@@ -3686,6 +3686,20 @@ int perf_session__write_header(struct perf_session *session,
        return perf_session__do_write_header(session, evlist, fd, at_exit, NULL);
 }
 
+size_t perf_session__data_offset(const struct evlist *evlist)
+{
+       struct evsel *evsel;
+       size_t data_offset;
+
+       data_offset = sizeof(struct perf_file_header);
+       evlist__for_each_entry(evlist, evsel) {
+               data_offset += evsel->core.ids * sizeof(u64);
+       }
+       data_offset += evlist->core.nr_entries * sizeof(struct perf_file_attr);
+
+       return data_offset;
+}
+
 int perf_session__inject_header(struct perf_session *session,
                                struct evlist *evlist,
                                int fd,
index 08563c1..56916da 100644 (file)
@@ -136,6 +136,8 @@ int perf_session__inject_header(struct perf_session *session,
                                int fd,
                                struct feat_copier *fc);
 
+size_t perf_session__data_offset(const struct evlist *evlist);
+
 void perf_header__set_feat(struct perf_header *header, int feat);
 void perf_header__clear_feat(struct perf_header *header, int feat);
 bool perf_header__has_feat(const struct perf_header *header, int feat);
index ee8fcfa..8f7baea 100644 (file)
@@ -1372,6 +1372,7 @@ static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu,
 
        *out_evlist = NULL;
        if (!metric_no_merge || hashmap__size(ids->ids) == 0) {
+               bool added_event = false;
                int i;
                /*
                 * We may fail to share events between metrics because a tool
@@ -1393,8 +1394,16 @@ static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu,
                                if (!tmp)
                                        return -ENOMEM;
                                ids__insert(ids->ids, tmp);
+                               added_event = true;
                        }
                }
+               if (!added_event && hashmap__size(ids->ids) == 0) {
+                       char *tmp = strdup("duration_time");
+
+                       if (!tmp)
+                               return -ENOMEM;
+                       ids__insert(ids->ids, tmp);
+               }
        }
        ret = metricgroup__build_event_string(&events, ids, modifier,
                                              has_constraint);
index 548008f..2dd67c6 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef PERF_UTIL_OFF_CPU_H
 #define PERF_UTIL_OFF_CPU_H
 
+#include <linux/perf_event.h>
+
 struct evlist;
 struct target;
 struct perf_session;
@@ -8,6 +10,13 @@ struct record_opts;
 
 #define OFFCPU_EVENT  "offcpu-time"
 
+#define OFFCPU_SAMPLE_TYPES  (PERF_SAMPLE_IDENTIFIER | PERF_SAMPLE_IP | \
+                             PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
+                             PERF_SAMPLE_ID | PERF_SAMPLE_CPU | \
+                             PERF_SAMPLE_PERIOD | PERF_SAMPLE_CALLCHAIN | \
+                             PERF_SAMPLE_CGROUP)
+
+
 #ifdef HAVE_BPF_SKEL
 int off_cpu_prepare(struct evlist *evlist, struct target *target,
                    struct record_opts *opts);
index ecd3779..b3be5b1 100644 (file)
@@ -233,6 +233,33 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
        return NULL;
 }
 
+static int elf_read_program_header(Elf *elf, u64 vaddr, GElf_Phdr *phdr)
+{
+       size_t i, phdrnum;
+       u64 sz;
+
+       if (elf_getphdrnum(elf, &phdrnum))
+               return -1;
+
+       for (i = 0; i < phdrnum; i++) {
+               if (gelf_getphdr(elf, i, phdr) == NULL)
+                       return -1;
+
+               if (phdr->p_type != PT_LOAD)
+                       continue;
+
+               sz = max(phdr->p_memsz, phdr->p_filesz);
+               if (!sz)
+                       continue;
+
+               if (vaddr >= phdr->p_vaddr && (vaddr < phdr->p_vaddr + sz))
+                       return 0;
+       }
+
+       /* Not found any valid program header */
+       return -1;
+}
+
 static bool want_demangle(bool is_kernel_sym)
 {
        return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
@@ -1209,6 +1236,7 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
                                        sym.st_value);
                        used_opd = true;
                }
+
                /*
                 * When loading symbols in a data mapping, ABS symbols (which
                 * has a value of SHN_ABS in its st_shndx) failed at
@@ -1227,6 +1255,17 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
 
                gelf_getshdr(sec, &shdr);
 
+               /*
+                * If the attribute bit SHF_ALLOC is not set, the section
+                * doesn't occupy memory during process execution.
+                * E.g. ".gnu.warning.*" section is used by linker to generate
+                * warnings when calling deprecated functions, the symbols in
+                * the section aren't loaded to memory during process execution,
+                * so skip them.
+                */
+               if (!(shdr.sh_flags & SHF_ALLOC))
+                       continue;
+
                secstrs = secstrs_sym;
 
                /*
@@ -1262,11 +1301,20 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
                                goto out_elf_end;
                } else if ((used_opd && runtime_ss->adjust_symbols) ||
                           (!used_opd && syms_ss->adjust_symbols)) {
+                       GElf_Phdr phdr;
+
+                       if (elf_read_program_header(syms_ss->elf,
+                                                   (u64)sym.st_value, &phdr)) {
+                               pr_warning("%s: failed to find program header for "
+                                          "symbol: %s st_value: %#" PRIx64 "\n",
+                                          __func__, elf_name, (u64)sym.st_value);
+                               continue;
+                       }
                        pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
-                                 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
-                                 (u64)sym.st_value, (u64)shdr.sh_addr,
-                                 (u64)shdr.sh_offset);
-                       sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+                                 "p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n",
+                                 __func__, (u64)sym.st_value, (u64)phdr.p_vaddr,
+                                 (u64)phdr.p_offset);
+                       sym.st_value -= phdr.p_vaddr - phdr.p_offset;
                }
 
                demangled = demangle_sym(dso, kmodule, elf_name);
index 27acdc5..84d17bd 100644 (file)
@@ -754,7 +754,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
        snprintf(filename, sizeof(filename), "%s/proc/%d/task",
                 machine->root_dir, pid);
 
-       n = scandir(filename, &dirent, filter_task, alphasort);
+       n = scandir(filename, &dirent, filter_task, NULL);
        if (n < 0)
                return n;
 
@@ -767,11 +767,12 @@ static int __event__synthesize_thread(union perf_event *comm_event,
                if (*end)
                        continue;
 
-               rc = -1;
+               /* some threads may exit just after scan, ignore it */
                if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
                                             &tgid, &ppid, &kernel_thread) != 0)
-                       break;
+                       continue;
 
+               rc = -1;
                if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
                                                ppid, process, machine) < 0)
                        break;
@@ -987,7 +988,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
                return 0;
 
        snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
-       n = scandir(proc_path, &dirent, filter_task, alphasort);
+       n = scandir(proc_path, &dirent, filter_task, NULL);
        if (n < 0)
                return err;
 
index 3762269..81b6bd6 100644 (file)
@@ -174,7 +174,7 @@ static int elf_section_address_and_offset(int fd, const char *name, u64 *address
        Elf *elf;
        GElf_Ehdr ehdr;
        GElf_Shdr shdr;
-       int ret;
+       int ret = -1;
 
        elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
        if (elf == NULL)
@@ -197,7 +197,7 @@ out_err:
 #ifndef NO_LIBUNWIND_DEBUG_FRAME
 static u64 elf_section_offset(int fd, const char *name)
 {
-       u64 address, offset;
+       u64 address, offset = 0;
 
        if (elf_section_address_and_offset(fd, name, &address, &offset))
                return 0;
index 83ef55e..2974b44 100644 (file)
@@ -121,24 +121,24 @@ static void kprobe_multi_link_api_subtest(void)
 })
 
        GET_ADDR("bpf_fentry_test1", addrs[0]);
-       GET_ADDR("bpf_fentry_test2", addrs[1]);
-       GET_ADDR("bpf_fentry_test3", addrs[2]);
-       GET_ADDR("bpf_fentry_test4", addrs[3]);
-       GET_ADDR("bpf_fentry_test5", addrs[4]);
-       GET_ADDR("bpf_fentry_test6", addrs[5]);
-       GET_ADDR("bpf_fentry_test7", addrs[6]);
+       GET_ADDR("bpf_fentry_test3", addrs[1]);
+       GET_ADDR("bpf_fentry_test4", addrs[2]);
+       GET_ADDR("bpf_fentry_test5", addrs[3]);
+       GET_ADDR("bpf_fentry_test6", addrs[4]);
+       GET_ADDR("bpf_fentry_test7", addrs[5]);
+       GET_ADDR("bpf_fentry_test2", addrs[6]);
        GET_ADDR("bpf_fentry_test8", addrs[7]);
 
 #undef GET_ADDR
 
-       cookies[0] = 1;
-       cookies[1] = 2;
-       cookies[2] = 3;
-       cookies[3] = 4;
-       cookies[4] = 5;
-       cookies[5] = 6;
-       cookies[6] = 7;
-       cookies[7] = 8;
+       cookies[0] = 1; /* bpf_fentry_test1 */
+       cookies[1] = 2; /* bpf_fentry_test3 */
+       cookies[2] = 3; /* bpf_fentry_test4 */
+       cookies[3] = 4; /* bpf_fentry_test5 */
+       cookies[4] = 5; /* bpf_fentry_test6 */
+       cookies[5] = 6; /* bpf_fentry_test7 */
+       cookies[6] = 7; /* bpf_fentry_test2 */
+       cookies[7] = 8; /* bpf_fentry_test8 */
 
        opts.kprobe_multi.addrs = (const unsigned long *) &addrs;
        opts.kprobe_multi.cnt = ARRAY_SIZE(addrs);
@@ -149,14 +149,14 @@ static void kprobe_multi_link_api_subtest(void)
        if (!ASSERT_GE(link1_fd, 0, "link1_fd"))
                goto cleanup;
 
-       cookies[0] = 8;
-       cookies[1] = 7;
-       cookies[2] = 6;
-       cookies[3] = 5;
-       cookies[4] = 4;
-       cookies[5] = 3;
-       cookies[6] = 2;
-       cookies[7] = 1;
+       cookies[0] = 8; /* bpf_fentry_test1 */
+       cookies[1] = 7; /* bpf_fentry_test3 */
+       cookies[2] = 6; /* bpf_fentry_test4 */
+       cookies[3] = 5; /* bpf_fentry_test5 */
+       cookies[4] = 4; /* bpf_fentry_test6 */
+       cookies[5] = 3; /* bpf_fentry_test7 */
+       cookies[6] = 2; /* bpf_fentry_test2 */
+       cookies[7] = 1; /* bpf_fentry_test8 */
 
        opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN;
        prog_fd = bpf_program__fd(skel->progs.test_kretprobe);
@@ -181,12 +181,12 @@ static void kprobe_multi_attach_api_subtest(void)
        struct kprobe_multi *skel = NULL;
        const char *syms[8] = {
                "bpf_fentry_test1",
-               "bpf_fentry_test2",
                "bpf_fentry_test3",
                "bpf_fentry_test4",
                "bpf_fentry_test5",
                "bpf_fentry_test6",
                "bpf_fentry_test7",
+               "bpf_fentry_test2",
                "bpf_fentry_test8",
        };
        __u64 cookies[8];
@@ -198,14 +198,14 @@ static void kprobe_multi_attach_api_subtest(void)
        skel->bss->pid = getpid();
        skel->bss->test_cookie = true;
 
-       cookies[0] = 1;
-       cookies[1] = 2;
-       cookies[2] = 3;
-       cookies[3] = 4;
-       cookies[4] = 5;
-       cookies[5] = 6;
-       cookies[6] = 7;
-       cookies[7] = 8;
+       cookies[0] = 1; /* bpf_fentry_test1 */
+       cookies[1] = 2; /* bpf_fentry_test3 */
+       cookies[2] = 3; /* bpf_fentry_test4 */
+       cookies[3] = 4; /* bpf_fentry_test5 */
+       cookies[4] = 5; /* bpf_fentry_test6 */
+       cookies[5] = 6; /* bpf_fentry_test7 */
+       cookies[6] = 7; /* bpf_fentry_test2 */
+       cookies[7] = 8; /* bpf_fentry_test8 */
 
        opts.syms = syms;
        opts.cnt = ARRAY_SIZE(syms);
@@ -216,14 +216,14 @@ static void kprobe_multi_attach_api_subtest(void)
        if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts"))
                goto cleanup;
 
-       cookies[0] = 8;
-       cookies[1] = 7;
-       cookies[2] = 6;
-       cookies[3] = 5;
-       cookies[4] = 4;
-       cookies[5] = 3;
-       cookies[6] = 2;
-       cookies[7] = 1;
+       cookies[0] = 8; /* bpf_fentry_test1 */
+       cookies[1] = 7; /* bpf_fentry_test3 */
+       cookies[2] = 6; /* bpf_fentry_test4 */
+       cookies[3] = 5; /* bpf_fentry_test5 */
+       cookies[4] = 4; /* bpf_fentry_test6 */
+       cookies[5] = 3; /* bpf_fentry_test7 */
+       cookies[6] = 2; /* bpf_fentry_test2 */
+       cookies[7] = 1; /* bpf_fentry_test8 */
 
        opts.retprobe = true;
 
index 586dc52..5b93d5d 100644 (file)
@@ -364,6 +364,9 @@ static int get_syms(char ***symsp, size_t *cntp)
                        continue;
                if (!strncmp(name, "rcu_", 4))
                        continue;
+               if (!strncmp(name, "__ftrace_invalid_address__",
+                            sizeof("__ftrace_invalid_address__") - 1))
+                       continue;
                err = hashmap__add(map, name, NULL);
                if (err) {
                        free(name);
index af293ea..e172d89 100644 (file)
@@ -4,6 +4,7 @@
  * Tests for sockmap/sockhash holding kTLS sockets.
  */
 
+#include <netinet/tcp.h>
 #include "test_progs.h"
 
 #define MAX_TEST_NAME 80
@@ -92,9 +93,78 @@ close_srv:
        close(srv);
 }
 
+static void test_sockmap_ktls_update_fails_when_sock_has_ulp(int family, int map)
+{
+       struct sockaddr_storage addr = {};
+       socklen_t len = sizeof(addr);
+       struct sockaddr_in6 *v6;
+       struct sockaddr_in *v4;
+       int err, s, zero = 0;
+
+       switch (family) {
+       case AF_INET:
+               v4 = (struct sockaddr_in *)&addr;
+               v4->sin_family = AF_INET;
+               break;
+       case AF_INET6:
+               v6 = (struct sockaddr_in6 *)&addr;
+               v6->sin6_family = AF_INET6;
+               break;
+       default:
+               PRINT_FAIL("unsupported socket family %d", family);
+               return;
+       }
+
+       s = socket(family, SOCK_STREAM, 0);
+       if (!ASSERT_GE(s, 0, "socket"))
+               return;
+
+       err = bind(s, (struct sockaddr *)&addr, len);
+       if (!ASSERT_OK(err, "bind"))
+               goto close;
+
+       err = getsockname(s, (struct sockaddr *)&addr, &len);
+       if (!ASSERT_OK(err, "getsockname"))
+               goto close;
+
+       err = connect(s, (struct sockaddr *)&addr, len);
+       if (!ASSERT_OK(err, "connect"))
+               goto close;
+
+       /* save sk->sk_prot and set it to tls_prots */
+       err = setsockopt(s, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls"));
+       if (!ASSERT_OK(err, "setsockopt(TCP_ULP)"))
+               goto close;
+
+       /* sockmap update should not affect saved sk_prot */
+       err = bpf_map_update_elem(map, &zero, &s, BPF_ANY);
+       if (!ASSERT_ERR(err, "sockmap update elem"))
+               goto close;
+
+       /* call sk->sk_prot->setsockopt to dispatch to saved sk_prot */
+       err = setsockopt(s, IPPROTO_TCP, TCP_NODELAY, &zero, sizeof(zero));
+       ASSERT_OK(err, "setsockopt(TCP_NODELAY)");
+
+close:
+       close(s);
+}
+
+static const char *fmt_test_name(const char *subtest_name, int family,
+                                enum bpf_map_type map_type)
+{
+       const char *map_type_str = BPF_MAP_TYPE_SOCKMAP ? "SOCKMAP" : "SOCKHASH";
+       const char *family_str = AF_INET ? "IPv4" : "IPv6";
+       static char test_name[MAX_TEST_NAME];
+
+       snprintf(test_name, MAX_TEST_NAME,
+                "sockmap_ktls %s %s %s",
+                subtest_name, family_str, map_type_str);
+
+       return test_name;
+}
+
 static void run_tests(int family, enum bpf_map_type map_type)
 {
-       char test_name[MAX_TEST_NAME];
        int map;
 
        map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL);
@@ -103,14 +173,10 @@ static void run_tests(int family, enum bpf_map_type map_type)
                return;
        }
 
-       snprintf(test_name, MAX_TEST_NAME,
-                "sockmap_ktls disconnect_after_delete %s %s",
-                family == AF_INET ? "IPv4" : "IPv6",
-                map_type == BPF_MAP_TYPE_SOCKMAP ? "SOCKMAP" : "SOCKHASH");
-       if (!test__start_subtest(test_name))
-               return;
-
-       test_sockmap_ktls_disconnect_after_delete(family, map);
+       if (test__start_subtest(fmt_test_name("disconnect_after_delete", family, map_type)))
+               test_sockmap_ktls_disconnect_after_delete(family, map);
+       if (test__start_subtest(fmt_test_name("update_fails_when_sock_has_ulp", family, map_type)))
+               test_sockmap_ktls_update_fails_when_sock_has_ulp(family, map);
 
        close(map);
 }
index c4da87e..19c7088 100644 (file)
@@ -831,6 +831,59 @@ out:
        bpf_object__close(obj);
 }
 
+#include "tailcall_bpf2bpf6.skel.h"
+
+/* Tail call counting works even when there is data on stack which is
+ * not aligned to 8 bytes.
+ */
+static void test_tailcall_bpf2bpf_6(void)
+{
+       struct tailcall_bpf2bpf6 *obj;
+       int err, map_fd, prog_fd, main_fd, data_fd, i, val;
+       LIBBPF_OPTS(bpf_test_run_opts, topts,
+               .data_in = &pkt_v4,
+               .data_size_in = sizeof(pkt_v4),
+               .repeat = 1,
+       );
+
+       obj = tailcall_bpf2bpf6__open_and_load();
+       if (!ASSERT_OK_PTR(obj, "open and load"))
+               return;
+
+       main_fd = bpf_program__fd(obj->progs.entry);
+       if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
+               goto out;
+
+       map_fd = bpf_map__fd(obj->maps.jmp_table);
+       if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
+               goto out;
+
+       prog_fd = bpf_program__fd(obj->progs.classifier_0);
+       if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
+               goto out;
+
+       i = 0;
+       err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+       if (!ASSERT_OK(err, "jmp_table map update"))
+               goto out;
+
+       err = bpf_prog_test_run_opts(main_fd, &topts);
+       ASSERT_OK(err, "entry prog test run");
+       ASSERT_EQ(topts.retval, 0, "tailcall retval");
+
+       data_fd = bpf_map__fd(obj->maps.bss);
+       if (!ASSERT_GE(map_fd, 0, "bss map fd"))
+               goto out;
+
+       i = 0;
+       err = bpf_map_lookup_elem(data_fd, &i, &val);
+       ASSERT_OK(err, "bss map lookup");
+       ASSERT_EQ(val, 1, "done flag is set");
+
+out:
+       tailcall_bpf2bpf6__destroy(obj);
+}
+
 void test_tailcalls(void)
 {
        if (test__start_subtest("tailcall_1"))
@@ -855,4 +908,6 @@ void test_tailcalls(void)
                test_tailcall_bpf2bpf_4(false);
        if (test__start_subtest("tailcall_bpf2bpf_5"))
                test_tailcall_bpf2bpf_4(true);
+       if (test__start_subtest("tailcall_bpf2bpf_6"))
+               test_tailcall_bpf2bpf_6();
 }
index d811cff..0a26c24 100644 (file)
@@ -140,12 +140,12 @@ int use_after_invalid(void *ctx)
 
        bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(read_data), 0, &ptr);
 
-       bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0);
+       bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
 
        bpf_ringbuf_submit_dynptr(&ptr, 0);
 
        /* this should fail */
-       bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0);
+       bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
 
        return 0;
 }
@@ -338,7 +338,7 @@ int invalid_helper2(void *ctx)
        get_map_val_dynptr(&ptr);
 
        /* this should fail */
-       bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 8, 0);
+       bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 8, 0, 0);
 
        return 0;
 }
@@ -377,7 +377,7 @@ int invalid_write2(void *ctx)
        memcpy((void *)&ptr + 8, &x, sizeof(x));
 
        /* this should fail */
-       bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0);
+       bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
 
        bpf_ringbuf_submit_dynptr(&ptr, 0);
 
@@ -473,7 +473,7 @@ int invalid_read2(void *ctx)
        get_map_val_dynptr(&ptr);
 
        /* this should fail */
-       bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 1, 0);
+       bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 1, 0, 0);
 
        return 0;
 }
index d67be48..a3a6103 100644 (file)
@@ -43,10 +43,10 @@ int test_read_write(void *ctx)
        bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(write_data), 0, &ptr);
 
        /* Write data into the dynptr */
-       err = err ?: bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data));
+       err = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
 
        /* Read the data that was written into the dynptr */
-       err = err ?: bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0);
+       err = err ?: bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
 
        /* Ensure the data we read matches the data we wrote */
        for (i = 0; i < sizeof(read_data); i++) {
index 93510f4..08f95a8 100644 (file)
@@ -54,21 +54,21 @@ static void kprobe_multi_check(void *ctx, bool is_return)
 
        if (is_return) {
                SET(kretprobe_test1_result, &bpf_fentry_test1, 8);
-               SET(kretprobe_test2_result, &bpf_fentry_test2, 7);
-               SET(kretprobe_test3_result, &bpf_fentry_test3, 6);
-               SET(kretprobe_test4_result, &bpf_fentry_test4, 5);
-               SET(kretprobe_test5_result, &bpf_fentry_test5, 4);
-               SET(kretprobe_test6_result, &bpf_fentry_test6, 3);
-               SET(kretprobe_test7_result, &bpf_fentry_test7, 2);
+               SET(kretprobe_test2_result, &bpf_fentry_test2, 2);
+               SET(kretprobe_test3_result, &bpf_fentry_test3, 7);
+               SET(kretprobe_test4_result, &bpf_fentry_test4, 6);
+               SET(kretprobe_test5_result, &bpf_fentry_test5, 5);
+               SET(kretprobe_test6_result, &bpf_fentry_test6, 4);
+               SET(kretprobe_test7_result, &bpf_fentry_test7, 3);
                SET(kretprobe_test8_result, &bpf_fentry_test8, 1);
        } else {
                SET(kprobe_test1_result, &bpf_fentry_test1, 1);
-               SET(kprobe_test2_result, &bpf_fentry_test2, 2);
-               SET(kprobe_test3_result, &bpf_fentry_test3, 3);
-               SET(kprobe_test4_result, &bpf_fentry_test4, 4);
-               SET(kprobe_test5_result, &bpf_fentry_test5, 5);
-               SET(kprobe_test6_result, &bpf_fentry_test6, 6);
-               SET(kprobe_test7_result, &bpf_fentry_test7, 7);
+               SET(kprobe_test2_result, &bpf_fentry_test2, 7);
+               SET(kprobe_test3_result, &bpf_fentry_test3, 2);
+               SET(kprobe_test4_result, &bpf_fentry_test4, 3);
+               SET(kprobe_test5_result, &bpf_fentry_test5, 4);
+               SET(kprobe_test6_result, &bpf_fentry_test6, 5);
+               SET(kprobe_test7_result, &bpf_fentry_test7, 6);
                SET(kprobe_test8_result, &bpf_fentry_test8, 8);
        }
 
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c
new file mode 100644 (file)
index 0000000..41ce83d
--- /dev/null
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+#define __unused __attribute__((unused))
+
+struct {
+       __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+       __uint(max_entries, 1);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+int done = 0;
+
+SEC("tc")
+int classifier_0(struct __sk_buff *skb __unused)
+{
+       done = 1;
+       return 0;
+}
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb)
+{
+       /* Don't propagate the constant to the caller */
+       volatile int ret = 1;
+
+       bpf_tail_call_static(skb, &jmp_table, 0);
+       return ret;
+}
+
+SEC("tc")
+int entry(struct __sk_buff *skb)
+{
+       /* Have data on stack which size is not a multiple of 8 */
+       volatile char arr[1] = {};
+
+       return subprog_tail(skb);
+}
+
+char __license[] SEC("license") = "GPL";
index 6ddc418..1a27a62 100644 (file)
        .result = ACCEPT,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "jeq32/jne32: bounds checking",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_6, 563),
+       BPF_MOV64_IMM(BPF_REG_2, 0),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+       BPF_ALU32_REG(BPF_OR, BPF_REG_2, BPF_REG_6),
+       BPF_JMP32_IMM(BPF_JNE, BPF_REG_2, 8, 5),
+       BPF_JMP_IMM(BPF_JSGE, BPF_REG_2, 500, 2),
+       BPF_MOV64_IMM(BPF_REG_0, 2),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = ACCEPT,
+       .retval = 1,
+},
index 6f951d1..497fe17 100644 (file)
        .result = ACCEPT,
        .retval = 3,
 },
+{
+       "jump & dead code elimination",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_MOV64_IMM(BPF_REG_3, 0),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_3, 0),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_3, 0),
+       BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 32767),
+       BPF_JMP_IMM(BPF_JSGE, BPF_REG_3, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0x8000, 1),
+       BPF_EXIT_INSN(),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -32767),
+       BPF_MOV64_IMM(BPF_REG_0, 2),
+       BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 0, 1),
+       BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = ACCEPT,
+       .retval = 2,
+},
index aa8e8b5..cd8c5ec 100644 (file)
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 CFLAGS += -I../../../../usr/include/
+CFLAGS += -I../../../../include/
 
 TEST_GEN_PROGS := dma_map_benchmark
 
index c3b3c09..5c997f1 100644 (file)
@@ -10,8 +10,8 @@
 #include <unistd.h>
 #include <sys/ioctl.h>
 #include <sys/mman.h>
-#include <linux/map_benchmark.h>
 #include <linux/types.h>
+#include <linux/map_benchmark.h>
 
 #define NSEC_PER_MSEC  1000000L
 
index 71b3066..616ed40 100644 (file)
@@ -3,6 +3,6 @@
 TEST_PROGS := gpio-mockup.sh gpio-sim.sh
 TEST_FILES := gpio-mockup-sysfs.sh
 TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev gpio-chip-info gpio-line-name
-CFLAGS += -O2 -g -Wall -I../../../../usr/include/
+CFLAGS += -O2 -g -Wall -I../../../../usr/include/ $(KHDR_INCLUDES)
 
 include ../lib.mk
index 81470a9..22423c8 100644 (file)
@@ -37,11 +37,38 @@ ifeq ($(ARCH),riscv)
        UNAME_M := riscv
 endif
 
-LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
-LIBKVM_x86_64 = lib/x86_64/apic.c lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
-LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S lib/aarch64/spinlock.c lib/aarch64/gic.c lib/aarch64/gic_v3.c lib/aarch64/vgic.c
-LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
-LIBKVM_riscv = lib/riscv/processor.c lib/riscv/ucall.c
+LIBKVM += lib/assert.c
+LIBKVM += lib/elf.c
+LIBKVM += lib/guest_modes.c
+LIBKVM += lib/io.c
+LIBKVM += lib/kvm_util.c
+LIBKVM += lib/perf_test_util.c
+LIBKVM += lib/rbtree.c
+LIBKVM += lib/sparsebit.c
+LIBKVM += lib/test_util.c
+
+LIBKVM_x86_64 += lib/x86_64/apic.c
+LIBKVM_x86_64 += lib/x86_64/handlers.S
+LIBKVM_x86_64 += lib/x86_64/perf_test_util.c
+LIBKVM_x86_64 += lib/x86_64/processor.c
+LIBKVM_x86_64 += lib/x86_64/svm.c
+LIBKVM_x86_64 += lib/x86_64/ucall.c
+LIBKVM_x86_64 += lib/x86_64/vmx.c
+
+LIBKVM_aarch64 += lib/aarch64/gic.c
+LIBKVM_aarch64 += lib/aarch64/gic_v3.c
+LIBKVM_aarch64 += lib/aarch64/handlers.S
+LIBKVM_aarch64 += lib/aarch64/processor.c
+LIBKVM_aarch64 += lib/aarch64/spinlock.c
+LIBKVM_aarch64 += lib/aarch64/ucall.c
+LIBKVM_aarch64 += lib/aarch64/vgic.c
+
+LIBKVM_s390x += lib/s390x/diag318_test_handler.c
+LIBKVM_s390x += lib/s390x/processor.c
+LIBKVM_s390x += lib/s390x/ucall.c
+
+LIBKVM_riscv += lib/riscv/processor.c
+LIBKVM_riscv += lib/riscv/ucall.c
 
 TEST_GEN_PROGS_x86_64 = x86_64/cpuid_test
 TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
@@ -173,12 +200,13 @@ LDFLAGS += -pthread $(no-pie-option) $(pgste-option)
 # $(TEST_GEN_PROGS) starts with $(OUTPUT)/
 include ../lib.mk
 
-STATIC_LIBS := $(OUTPUT)/libkvm.a
 LIBKVM_C := $(filter %.c,$(LIBKVM))
 LIBKVM_S := $(filter %.S,$(LIBKVM))
 LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C))
 LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
-EXTRA_CLEAN += $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(STATIC_LIBS) cscope.*
+LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)
+
+EXTRA_CLEAN += $(LIBKVM_OBJS) cscope.*
 
 x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ))))
 $(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c
@@ -187,13 +215,8 @@ $(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c
 $(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S
        $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
 
-LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)
-$(OUTPUT)/libkvm.a: $(LIBKVM_OBJS)
-       $(AR) crs $@ $^
-
 x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS))))
-all: $(STATIC_LIBS)
-$(TEST_GEN_PROGS): $(STATIC_LIBS)
+$(TEST_GEN_PROGS): $(LIBKVM_OBJS)
 
 cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib ..
 cscope:
index 7b47ae4..d60a34c 100644 (file)
@@ -336,8 +336,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 static void help(char *name)
 {
        puts("");
-       printf("usage: %s [-h] [-i iterations] [-p offset] [-g]"
-              "[-m mode] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]"
+       printf("usage: %s [-h] [-i iterations] [-p offset] [-g] "
+              "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]"
               "[-x memslots]\n", name);
        puts("");
        printf(" -i: specify iteration counts (default: %"PRIu64")\n",
@@ -351,6 +351,7 @@ static void help(char *name)
        printf(" -p: specify guest physical test memory offset\n"
               "     Warning: a low offset can conflict with the loaded test code.\n");
        guest_modes_help();
+       printf(" -n: Run the vCPUs in nested mode (L2)\n");
        printf(" -b: specify the size of the memory region which should be\n"
               "     dirtied by each vCPU. e.g. 10M or 3G.\n"
               "     (default: 1G)\n");
@@ -387,7 +388,7 @@ int main(int argc, char *argv[])
 
        guest_modes_append_default();
 
-       while ((opt = getopt(argc, argv, "ghi:p:m:b:f:v:os:x:")) != -1) {
+       while ((opt = getopt(argc, argv, "ghi:p:m:nb:f:v:os:x:")) != -1) {
                switch (opt) {
                case 'g':
                        dirty_log_manual_caps = 0;
@@ -401,6 +402,9 @@ int main(int argc, char *argv[])
                case 'm':
                        guest_modes_cmdline(optarg);
                        break;
+               case 'n':
+                       perf_test_args.nested = true;
+                       break;
                case 'b':
                        guest_percpu_mem_size = parse_size(optarg);
                        break;
index a86f953..d822cb6 100644 (file)
@@ -30,10 +30,15 @@ struct perf_test_vcpu_args {
 
 struct perf_test_args {
        struct kvm_vm *vm;
+       /* The starting address and size of the guest test region. */
        uint64_t gpa;
+       uint64_t size;
        uint64_t guest_page_size;
        int wr_fract;
 
+       /* Run vCPUs in L2 instead of L1, if the architecture supports it. */
+       bool nested;
+
        struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS];
 };
 
@@ -49,5 +54,9 @@ void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract);
 
 void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *));
 void perf_test_join_vcpu_threads(int vcpus);
+void perf_test_guest_code(uint32_t vcpu_id);
+
+uint64_t perf_test_nested_pages(int nr_vcpus);
+void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus);
 
 #endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
index d0d51ad..6ce1854 100644 (file)
@@ -482,13 +482,23 @@ void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
 struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
 void vm_xsave_req_perm(int bit);
 
-enum x86_page_size {
-       X86_PAGE_SIZE_4K = 0,
-       X86_PAGE_SIZE_2M,
-       X86_PAGE_SIZE_1G,
+enum pg_level {
+       PG_LEVEL_NONE,
+       PG_LEVEL_4K,
+       PG_LEVEL_2M,
+       PG_LEVEL_1G,
+       PG_LEVEL_512G,
+       PG_LEVEL_NUM
 };
-void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
-                  enum x86_page_size page_size);
+
+#define PG_LEVEL_SHIFT(_level) ((_level - 1) * 9 + 12)
+#define PG_LEVEL_SIZE(_level) (1ull << PG_LEVEL_SHIFT(_level))
+
+#define PG_SIZE_4K PG_LEVEL_SIZE(PG_LEVEL_4K)
+#define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M)
+#define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G)
+
+void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level);
 
 /*
  * Basic CPU control in CR0
@@ -505,9 +515,6 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
 #define X86_CR0_CD          (1UL<<30) /* Cache Disable */
 #define X86_CR0_PG          (1UL<<31) /* Paging */
 
-/* VMX_EPT_VPID_CAP bits */
-#define VMX_EPT_VPID_CAP_AD_BITS       (1ULL << 21)
-
 #define XSTATE_XTILE_CFG_BIT           17
 #define XSTATE_XTILE_DATA_BIT          18
 
index 583ceb0..cc3604f 100644 (file)
@@ -96,6 +96,9 @@
 #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK    0x0000001f
 #define VMX_MISC_SAVE_EFER_LMA                 0x00000020
 
+#define VMX_EPT_VPID_CAP_1G_PAGES              0x00020000
+#define VMX_EPT_VPID_CAP_AD_BITS               0x00200000
+
 #define EXIT_REASON_FAILED_VMENTRY     0x80000000
 #define EXIT_REASON_EXCEPTION_NMI      0
 #define EXIT_REASON_EXTERNAL_INTERRUPT 1
@@ -606,6 +609,7 @@ bool load_vmcs(struct vmx_pages *vmx);
 
 bool nested_vmx_supported(void);
 void nested_vmx_check_supported(void);
+bool ept_1g_pages_supported(void);
 
 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
                   uint64_t nested_paddr, uint64_t paddr);
@@ -613,6 +617,8 @@ void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
                 uint64_t nested_paddr, uint64_t paddr, uint64_t size);
 void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
                        uint32_t memslot);
+void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
+                           uint64_t addr, uint64_t size);
 void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
                  uint32_t eptp_memslot);
 void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
index e0b0164..be1d972 100644 (file)
@@ -73,20 +73,19 @@ void ucall_uninit(struct kvm_vm *vm)
 
 void ucall(uint64_t cmd, int nargs, ...)
 {
-       struct ucall uc = {
-               .cmd = cmd,
-       };
+       struct ucall uc = {};
        va_list va;
        int i;
 
+       WRITE_ONCE(uc.cmd, cmd);
        nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
 
        va_start(va, nargs);
        for (i = 0; i < nargs; ++i)
-               uc.args[i] = va_arg(va, uint64_t);
+               WRITE_ONCE(uc.args[i], va_arg(va, uint64_t));
        va_end(va);
 
-       *ucall_exit_mmio_addr = (vm_vaddr_t)&uc;
+       WRITE_ONCE(*ucall_exit_mmio_addr, (vm_vaddr_t)&uc);
 }
 
 uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
index 722df3a..f989ff9 100644 (file)
@@ -40,7 +40,7 @@ static bool all_vcpu_threads_running;
  * Continuously write to the first 8 bytes of each page in the
  * specified region.
  */
-static void guest_code(uint32_t vcpu_id)
+void perf_test_guest_code(uint32_t vcpu_id)
 {
        struct perf_test_args *pta = &perf_test_args;
        struct perf_test_vcpu_args *vcpu_args = &pta->vcpu_args[vcpu_id];
@@ -108,8 +108,9 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
 {
        struct perf_test_args *pta = &perf_test_args;
        struct kvm_vm *vm;
-       uint64_t guest_num_pages;
+       uint64_t guest_num_pages, slot0_pages = DEFAULT_GUEST_PHY_PAGES;
        uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
+       uint64_t region_end_gfn;
        int i;
 
        pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
@@ -135,33 +136,53 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
                    slots);
 
        /*
+        * If using nested, allocate extra pages for the nested page tables and
+        * in-memory data structures.
+        */
+       if (pta->nested)
+               slot0_pages += perf_test_nested_pages(vcpus);
+
+       /*
         * Pass guest_num_pages to populate the page tables for test memory.
         * The memory is also added to memslot 0, but that's a benign side
         * effect as KVM allows aliasing HVAs in meslots.
         */
-       vm = vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES,
-                                 guest_num_pages, 0, guest_code, NULL);
+       vm = vm_create_with_vcpus(mode, vcpus, slot0_pages, guest_num_pages, 0,
+                                 perf_test_guest_code, NULL);
 
        pta->vm = vm;
 
+       /* Put the test region at the top guest physical memory. */
+       region_end_gfn = vm_get_max_gfn(vm) + 1;
+
+#ifdef __x86_64__
+       /*
+        * When running vCPUs in L2, restrict the test region to 48 bits to
+        * avoid needing 5-level page tables to identity map L2.
+        */
+       if (pta->nested)
+               region_end_gfn = min(region_end_gfn, (1UL << 48) / pta->guest_page_size);
+#endif
        /*
         * If there should be more memory in the guest test region than there
         * can be pages in the guest, it will definitely cause problems.
         */
-       TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
+       TEST_ASSERT(guest_num_pages < region_end_gfn,
                    "Requested more guest memory than address space allows.\n"
                    "    guest pages: %" PRIx64 " max gfn: %" PRIx64
                    " vcpus: %d wss: %" PRIx64 "]\n",
-                   guest_num_pages, vm_get_max_gfn(vm), vcpus,
+                   guest_num_pages, region_end_gfn - 1, vcpus,
                    vcpu_memory_bytes);
 
-       pta->gpa = (vm_get_max_gfn(vm) - guest_num_pages) * pta->guest_page_size;
+       pta->gpa = (region_end_gfn - guest_num_pages) * pta->guest_page_size;
        pta->gpa = align_down(pta->gpa, backing_src_pagesz);
 #ifdef __s390x__
        /* Align to 1M (segment size) */
        pta->gpa = align_down(pta->gpa, 1 << 20);
 #endif
-       pr_info("guest physical test memory offset: 0x%lx\n", pta->gpa);
+       pta->size = guest_num_pages * pta->guest_page_size;
+       pr_info("guest physical test memory: [0x%lx, 0x%lx)\n",
+               pta->gpa, pta->gpa + pta->size);
 
        /* Add extra memory slots for testing */
        for (i = 0; i < slots; i++) {
@@ -178,6 +199,11 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
 
        perf_test_setup_vcpus(vm, vcpus, vcpu_memory_bytes, partition_vcpu_memory_access);
 
+       if (pta->nested) {
+               pr_info("Configuring vCPUs to run in L2 (nested).\n");
+               perf_test_setup_nested(vm, vcpus);
+       }
+
        ucall_init(vm, NULL);
 
        /* Export the shared variables to the guest. */
@@ -198,6 +224,17 @@ void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract)
        sync_global_to_guest(vm, perf_test_args);
 }
 
+uint64_t __weak perf_test_nested_pages(int nr_vcpus)
+{
+       return 0;
+}
+
+void __weak perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus)
+{
+       pr_info("%s() not support on this architecture, skipping.\n", __func__);
+       exit(KSFT_SKIP);
+}
+
 static void *vcpu_thread_main(void *data)
 {
        struct vcpu_thread *vcpu = data;
diff --git a/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c b/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c
new file mode 100644 (file)
index 0000000..e258524
--- /dev/null
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * x86_64-specific extensions to perf_test_util.c.
+ *
+ * Copyright (C) 2022, Google, Inc.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "perf_test_util.h"
+#include "../kvm_util_internal.h"
+#include "processor.h"
+#include "vmx.h"
+
+void perf_test_l2_guest_code(uint64_t vcpu_id)
+{
+       perf_test_guest_code(vcpu_id);
+       vmcall();
+}
+
+extern char perf_test_l2_guest_entry[];
+__asm__(
+"perf_test_l2_guest_entry:"
+"      mov (%rsp), %rdi;"
+"      call perf_test_l2_guest_code;"
+"      ud2;"
+);
+
+static void perf_test_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
+{
+#define L2_GUEST_STACK_SIZE 64
+       unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+       unsigned long *rsp;
+
+       GUEST_ASSERT(vmx->vmcs_gpa);
+       GUEST_ASSERT(prepare_for_vmx_operation(vmx));
+       GUEST_ASSERT(load_vmcs(vmx));
+       GUEST_ASSERT(ept_1g_pages_supported());
+
+       rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
+       *rsp = vcpu_id;
+       prepare_vmcs(vmx, perf_test_l2_guest_entry, rsp);
+
+       GUEST_ASSERT(!vmlaunch());
+       GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+       GUEST_DONE();
+}
+
+uint64_t perf_test_nested_pages(int nr_vcpus)
+{
+       /*
+        * 513 page tables is enough to identity-map 256 TiB of L2 with 1G
+        * pages and 4-level paging, plus a few pages per-vCPU for data
+        * structures such as the VMCS.
+        */
+       return 513 + 10 * nr_vcpus;
+}
+
+void perf_test_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
+{
+       uint64_t start, end;
+
+       prepare_eptp(vmx, vm, 0);
+
+       /*
+        * Identity map the first 4G and the test region with 1G pages so that
+        * KVM can shadow the EPT12 with the maximum huge page size supported
+        * by the backing source.
+        */
+       nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL);
+
+       start = align_down(perf_test_args.gpa, PG_SIZE_1G);
+       end = align_up(perf_test_args.gpa + perf_test_args.size, PG_SIZE_1G);
+       nested_identity_map_1g(vmx, vm, start, end - start);
+}
+
+void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus)
+{
+       struct vmx_pages *vmx, *vmx0 = NULL;
+       struct kvm_regs regs;
+       vm_vaddr_t vmx_gva;
+       int vcpu_id;
+
+       nested_vmx_check_supported();
+
+       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+               vmx = vcpu_alloc_vmx(vm, &vmx_gva);
+
+               if (vcpu_id == 0) {
+                       perf_test_setup_ept(vmx, vm);
+                       vmx0 = vmx;
+               } else {
+                       /* Share the same EPT table across all vCPUs. */
+                       vmx->eptp = vmx0->eptp;
+                       vmx->eptp_hva = vmx0->eptp_hva;
+                       vmx->eptp_gpa = vmx0->eptp_gpa;
+               }
+
+               /*
+                * Override the vCPU to run perf_test_l1_guest_code() which will
+                * bounce it into L2 before calling perf_test_guest_code().
+                */
+               vcpu_regs_get(vm, vcpu_id, &regs);
+               regs.rip = (unsigned long) perf_test_l1_guest_code;
+               vcpu_regs_set(vm, vcpu_id, &regs);
+               vcpu_args_set(vm, vcpu_id, 2, vmx_gva, vcpu_id);
+       }
+}
index 33ea5e9..ead7011 100644 (file)
@@ -158,7 +158,7 @@ static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr,
                          int level)
 {
        uint64_t *page_table = addr_gpa2hva(vm, pt_pfn << vm->page_shift);
-       int index = vaddr >> (vm->page_shift + level * 9) & 0x1ffu;
+       int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
 
        return &page_table[index];
 }
@@ -167,14 +167,14 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
                                       uint64_t pt_pfn,
                                       uint64_t vaddr,
                                       uint64_t paddr,
-                                      int level,
-                                      enum x86_page_size page_size)
+                                      int current_level,
+                                      int target_level)
 {
-       uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, level);
+       uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, current_level);
 
        if (!(*pte & PTE_PRESENT_MASK)) {
                *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
-               if (level == page_size)
+               if (current_level == target_level)
                        *pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK);
                else
                        *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
@@ -184,20 +184,19 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
                 * a hugepage at this level, and that there isn't a hugepage at
                 * this level.
                 */
-               TEST_ASSERT(level != page_size,
+               TEST_ASSERT(current_level != target_level,
                            "Cannot create hugepage at level: %u, vaddr: 0x%lx\n",
-                           page_size, vaddr);
+                           current_level, vaddr);
                TEST_ASSERT(!(*pte & PTE_LARGE_MASK),
                            "Cannot create page table at level: %u, vaddr: 0x%lx\n",
-                           level, vaddr);
+                           current_level, vaddr);
        }
        return pte;
 }
 
-void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
-                  enum x86_page_size page_size)
+void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
 {
-       const uint64_t pg_size = 1ull << ((page_size * 9) + 12);
+       const uint64_t pg_size = PG_LEVEL_SIZE(level);
        uint64_t *pml4e, *pdpe, *pde;
        uint64_t *pte;
 
@@ -222,20 +221,20 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
         * early if a hugepage was created.
         */
        pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift,
-                                     vaddr, paddr, 3, page_size);
+                                     vaddr, paddr, PG_LEVEL_512G, level);
        if (*pml4e & PTE_LARGE_MASK)
                return;
 
-       pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, 2, page_size);
+       pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, PG_LEVEL_1G, level);
        if (*pdpe & PTE_LARGE_MASK)
                return;
 
-       pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, 1, page_size);
+       pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, PG_LEVEL_2M, level);
        if (*pde & PTE_LARGE_MASK)
                return;
 
        /* Fill in page table entry. */
-       pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, 0);
+       pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, PG_LEVEL_4K);
        TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
                    "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
        *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
@@ -243,7 +242,7 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
 
 void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
 {
-       __virt_pg_map(vm, vaddr, paddr, X86_PAGE_SIZE_4K);
+       __virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K);
 }
 
 static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
index d089d8b..b77a01d 100644 (file)
@@ -198,6 +198,16 @@ bool load_vmcs(struct vmx_pages *vmx)
        return true;
 }
 
+static bool ept_vpid_cap_supported(uint64_t mask)
+{
+       return rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & mask;
+}
+
+bool ept_1g_pages_supported(void)
+{
+       return ept_vpid_cap_supported(VMX_EPT_VPID_CAP_1G_PAGES);
+}
+
 /*
  * Initialize the control fields to the most basic settings possible.
  */
@@ -215,7 +225,7 @@ static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
                struct eptPageTablePointer eptp = {
                        .memory_type = VMX_BASIC_MEM_TYPE_WB,
                        .page_walk_length = 3, /* + 1 */
-                       .ad_enabled = !!(rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & VMX_EPT_VPID_CAP_AD_BITS),
+                       .ad_enabled = ept_vpid_cap_supported(VMX_EPT_VPID_CAP_AD_BITS),
                        .address = vmx->eptp_gpa >> PAGE_SHIFT_4K,
                };
 
@@ -392,80 +402,93 @@ void nested_vmx_check_supported(void)
        }
 }
 
-void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
-                  uint64_t nested_paddr, uint64_t paddr)
+static void nested_create_pte(struct kvm_vm *vm,
+                             struct eptPageTableEntry *pte,
+                             uint64_t nested_paddr,
+                             uint64_t paddr,
+                             int current_level,
+                             int target_level)
+{
+       if (!pte->readable) {
+               pte->writable = true;
+               pte->readable = true;
+               pte->executable = true;
+               pte->page_size = (current_level == target_level);
+               if (pte->page_size)
+                       pte->address = paddr >> vm->page_shift;
+               else
+                       pte->address = vm_alloc_page_table(vm) >> vm->page_shift;
+       } else {
+               /*
+                * Entry already present.  Assert that the caller doesn't want
+                * a hugepage at this level, and that there isn't a hugepage at
+                * this level.
+                */
+               TEST_ASSERT(current_level != target_level,
+                           "Cannot create hugepage at level: %u, nested_paddr: 0x%lx\n",
+                           current_level, nested_paddr);
+               TEST_ASSERT(!pte->page_size,
+                           "Cannot create page table at level: %u, nested_paddr: 0x%lx\n",
+                           current_level, nested_paddr);
+       }
+}
+
+
+void __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+                    uint64_t nested_paddr, uint64_t paddr, int target_level)
 {
-       uint16_t index[4];
-       struct eptPageTableEntry *pml4e;
+       const uint64_t page_size = PG_LEVEL_SIZE(target_level);
+       struct eptPageTableEntry *pt = vmx->eptp_hva, *pte;
+       uint16_t index;
 
        TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
                    "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
 
-       TEST_ASSERT((nested_paddr % vm->page_size) == 0,
+       TEST_ASSERT((nested_paddr >> 48) == 0,
+                   "Nested physical address 0x%lx requires 5-level paging",
+                   nested_paddr);
+       TEST_ASSERT((nested_paddr % page_size) == 0,
                    "Nested physical address not on page boundary,\n"
-                   "  nested_paddr: 0x%lx vm->page_size: 0x%x",
-                   nested_paddr, vm->page_size);
+                   "  nested_paddr: 0x%lx page_size: 0x%lx",
+                   nested_paddr, page_size);
        TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn,
                    "Physical address beyond beyond maximum supported,\n"
                    "  nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
                    paddr, vm->max_gfn, vm->page_size);
-       TEST_ASSERT((paddr % vm->page_size) == 0,
+       TEST_ASSERT((paddr % page_size) == 0,
                    "Physical address not on page boundary,\n"
-                   "  paddr: 0x%lx vm->page_size: 0x%x",
-                   paddr, vm->page_size);
+                   "  paddr: 0x%lx page_size: 0x%lx",
+                   paddr, page_size);
        TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
                    "Physical address beyond beyond maximum supported,\n"
                    "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
                    paddr, vm->max_gfn, vm->page_size);
 
-       index[0] = (nested_paddr >> 12) & 0x1ffu;
-       index[1] = (nested_paddr >> 21) & 0x1ffu;
-       index[2] = (nested_paddr >> 30) & 0x1ffu;
-       index[3] = (nested_paddr >> 39) & 0x1ffu;
-
-       /* Allocate page directory pointer table if not present. */
-       pml4e = vmx->eptp_hva;
-       if (!pml4e[index[3]].readable) {
-               pml4e[index[3]].address = vm_alloc_page_table(vm) >> vm->page_shift;
-               pml4e[index[3]].writable = true;
-               pml4e[index[3]].readable = true;
-               pml4e[index[3]].executable = true;
-       }
+       for (int level = PG_LEVEL_512G; level >= PG_LEVEL_4K; level--) {
+               index = (nested_paddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
+               pte = &pt[index];
 
-       /* Allocate page directory table if not present. */
-       struct eptPageTableEntry *pdpe;
-       pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
-       if (!pdpe[index[2]].readable) {
-               pdpe[index[2]].address = vm_alloc_page_table(vm) >> vm->page_shift;
-               pdpe[index[2]].writable = true;
-               pdpe[index[2]].readable = true;
-               pdpe[index[2]].executable = true;
-       }
+               nested_create_pte(vm, pte, nested_paddr, paddr, level, target_level);
 
-       /* Allocate page table if not present. */
-       struct eptPageTableEntry *pde;
-       pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
-       if (!pde[index[1]].readable) {
-               pde[index[1]].address = vm_alloc_page_table(vm) >> vm->page_shift;
-               pde[index[1]].writable = true;
-               pde[index[1]].readable = true;
-               pde[index[1]].executable = true;
-       }
+               if (pte->page_size)
+                       break;
 
-       /* Fill in page table entry. */
-       struct eptPageTableEntry *pte;
-       pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
-       pte[index[0]].address = paddr >> vm->page_shift;
-       pte[index[0]].writable = true;
-       pte[index[0]].readable = true;
-       pte[index[0]].executable = true;
+               pt = addr_gpa2hva(vm, pte->address * vm->page_size);
+       }
 
        /*
         * For now mark these as accessed and dirty because the only
         * testcase we have needs that.  Can be reconsidered later.
         */
-       pte[index[0]].accessed = true;
-       pte[index[0]].dirty = true;
+       pte->accessed = true;
+       pte->dirty = true;
+
+}
+
+void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+                  uint64_t nested_paddr, uint64_t paddr)
+{
+       __nested_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K);
 }
 
 /*
@@ -476,7 +499,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
  *   nested_paddr - Nested guest physical address to map
  *   paddr - VM Physical Address
  *   size - The size of the range to map
- *   eptp_memslot - Memory region slot for new virtual translation tables
+ *   level - The level at which to map the range
  *
  * Output Args: None
  *
@@ -485,22 +508,29 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
  * Within the VM given by vm, creates a nested guest translation for the
  * page range starting at nested_paddr to the page range starting at paddr.
  */
-void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
-               uint64_t nested_paddr, uint64_t paddr, uint64_t size)
+void __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+                 uint64_t nested_paddr, uint64_t paddr, uint64_t size,
+                 int level)
 {
-       size_t page_size = vm->page_size;
+       size_t page_size = PG_LEVEL_SIZE(level);
        size_t npages = size / page_size;
 
        TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
        TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
 
        while (npages--) {
-               nested_pg_map(vmx, vm, nested_paddr, paddr);
+               __nested_pg_map(vmx, vm, nested_paddr, paddr, level);
                nested_paddr += page_size;
                paddr += page_size;
        }
 }
 
+void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+               uint64_t nested_paddr, uint64_t paddr, uint64_t size)
+{
+       __nested_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K);
+}
+
 /* Prepare an identity extended page table that maps all the
  * physical pages in VM.
  */
@@ -525,6 +555,13 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
        }
 }
 
+/* Identity map a region with 1GiB Pages. */
+void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
+                           uint64_t addr, uint64_t size)
+{
+       __nested_map(vmx, vm, addr, addr, size, PG_LEVEL_1G);
+}
+
 void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
                  uint32_t eptp_memslot)
 {
index 3875c4b..15f046e 100644 (file)
@@ -244,7 +244,7 @@ int main(int argc, char *argv[])
 #ifdef __x86_64__
                /* Identity map memory in the guest using 1gb pages. */
                for (i = 0; i < slot_size; i += size_1gb)
-                       __virt_pg_map(vm, gpa + i, gpa + i, X86_PAGE_SIZE_1G);
+                       __virt_pg_map(vm, gpa + i, gpa + i, PG_LEVEL_1G);
 #else
                for (i = 0; i < slot_size; i += vm_get_page_size(vm))
                        virt_pg_map(vm, gpa + i, gpa + i);
index 4158da0..2237d1a 100644 (file)
@@ -82,8 +82,9 @@ static int next_cpu(int cpu)
        return cpu;
 }
 
-static void *migration_worker(void *ign)
+static void *migration_worker(void *__rseq_tid)
 {
+       pid_t rseq_tid = (pid_t)(unsigned long)__rseq_tid;
        cpu_set_t allowed_mask;
        int r, i, cpu;
 
@@ -106,7 +107,7 @@ static void *migration_worker(void *ign)
                 * stable, i.e. while changing affinity is in-progress.
                 */
                smp_wmb();
-               r = sched_setaffinity(0, sizeof(allowed_mask), &allowed_mask);
+               r = sched_setaffinity(rseq_tid, sizeof(allowed_mask), &allowed_mask);
                TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)",
                            errno, strerror(errno));
                smp_wmb();
@@ -231,7 +232,8 @@ int main(int argc, char *argv[])
        vm = vm_create_default(VCPU_ID, 0, guest_code);
        ucall_init(vm, NULL);
 
-       pthread_create(&migration_thread, NULL, migration_worker, 0);
+       pthread_create(&migration_thread, NULL, migration_worker,
+                      (void *)(unsigned long)gettid());
 
        for (i = 0; !done; i++) {
                vcpu_run(vm, VCPU_ID);
index da2325f..bdecd53 100644 (file)
@@ -35,7 +35,7 @@ static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val)
        run = vcpu_state(vm, VCPU_ID);
 
        /* Map 1gb page without a backing memlot. */
-       __virt_pg_map(vm, MMIO_GPA, MMIO_GPA, X86_PAGE_SIZE_1G);
+       __virt_pg_map(vm, MMIO_GPA, MMIO_GPA, PG_LEVEL_1G);
 
        r = _vcpu_run(vm, VCPU_ID);
 
index 2a2d240..1a5cc3c 100644 (file)
@@ -7,10 +7,31 @@ else ifneq ($(filter -%,$(LLVM)),)
 LLVM_SUFFIX := $(LLVM)
 endif
 
-CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
+CLANG_TARGET_FLAGS_arm          := arm-linux-gnueabi
+CLANG_TARGET_FLAGS_arm64        := aarch64-linux-gnu
+CLANG_TARGET_FLAGS_hexagon      := hexagon-linux-musl
+CLANG_TARGET_FLAGS_m68k         := m68k-linux-gnu
+CLANG_TARGET_FLAGS_mips         := mipsel-linux-gnu
+CLANG_TARGET_FLAGS_powerpc      := powerpc64le-linux-gnu
+CLANG_TARGET_FLAGS_riscv        := riscv64-linux-gnu
+CLANG_TARGET_FLAGS_s390         := s390x-linux-gnu
+CLANG_TARGET_FLAGS_x86          := x86_64-linux-gnu
+CLANG_TARGET_FLAGS              := $(CLANG_TARGET_FLAGS_$(ARCH))
+
+ifeq ($(CROSS_COMPILE),)
+ifeq ($(CLANG_TARGET_FLAGS),)
+$(error Specify CROSS_COMPILE or add '--target=' option to lib.mk
+else
+CLANG_FLAGS     += --target=$(CLANG_TARGET_FLAGS)
+endif # CLANG_TARGET_FLAGS
+else
+CLANG_FLAGS     += --target=$(notdir $(CROSS_COMPILE:%-=%))
+endif # CROSS_COMPILE
+
+CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX) $(CLANG_FLAGS) -fintegrated-as
 else
 CC := $(CROSS_COMPILE)gcc
-endif
+endif # LLVM
 
 ifeq (0,$(MAKELEVEL))
     ifeq ($(OUTPUT),)
index b984f8c..ffc35a2 100644 (file)
@@ -36,5 +36,5 @@ test_unix_oob
 gro
 ioam6_parser
 toeplitz
+tun
 cmsg_sender
-bind_bhash_test
index 464df13..db05b37 100644 (file)
@@ -11,7 +11,7 @@ TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
 TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh
 TEST_PROGS += test_vxlan_fdb_changelink.sh so_txtime.sh ipv6_flowlabel.sh
 TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh traceroute.sh
-TEST_PROGS += fin_ack_lat.sh fib_nexthop_multiprefix.sh fib_nexthops.sh
+TEST_PROGS += fin_ack_lat.sh fib_nexthop_multiprefix.sh fib_nexthops.sh fib_nexthop_nongw.sh
 TEST_PROGS += altnames.sh icmp.sh icmp_redirect.sh ip6_gre_headroom.sh
 TEST_PROGS += route_localnet.sh
 TEST_PROGS += reuseaddr_ports_exhausted.sh
@@ -54,12 +54,11 @@ TEST_GEN_FILES += ipsec
 TEST_GEN_FILES += ioam6_parser
 TEST_GEN_FILES += gro
 TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
-TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
+TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls tun
 TEST_GEN_FILES += toeplitz
 TEST_GEN_FILES += cmsg_sender
 TEST_GEN_FILES += stress_reuseport_listen
 TEST_PROGS += test_vxlan_vnifiltering.sh
-TEST_GEN_FILES += bind_bhash_test
 
 TEST_FILES := settings
 
@@ -70,5 +69,4 @@ include bpf/Makefile
 
 $(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
 $(OUTPUT)/tcp_mmap: LDLIBS += -lpthread
-$(OUTPUT)/bind_bhash_test: LDLIBS += -lpthread
 $(OUTPUT)/tcp_inq: LDLIBS += -lpthread
diff --git a/tools/testing/selftests/net/bind_bhash_test.c b/tools/testing/selftests/net/bind_bhash_test.c
deleted file mode 100644 (file)
index 252e737..0000000
+++ /dev/null
@@ -1,119 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This times how long it takes to bind to a port when the port already
- * has multiple sockets in its bhash table.
- *
- * In the setup(), we populate the port's bhash table with
- * MAX_THREADS * MAX_CONNECTIONS number of entries.
- */
-
-#include <unistd.h>
-#include <stdio.h>
-#include <netdb.h>
-#include <pthread.h>
-
-#define MAX_THREADS 600
-#define MAX_CONNECTIONS 40
-
-static const char *bind_addr = "::1";
-static const char *port;
-
-static int fd_array[MAX_THREADS][MAX_CONNECTIONS];
-
-static int bind_socket(int opt, const char *addr)
-{
-       struct addrinfo *res, hint = {};
-       int sock_fd, reuse = 1, err;
-
-       sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
-       if (sock_fd < 0) {
-               perror("socket fd err");
-               return -1;
-       }
-
-       hint.ai_family = AF_INET6;
-       hint.ai_socktype = SOCK_STREAM;
-
-       err = getaddrinfo(addr, port, &hint, &res);
-       if (err) {
-               perror("getaddrinfo failed");
-               return -1;
-       }
-
-       if (opt) {
-               err = setsockopt(sock_fd, SOL_SOCKET, opt, &reuse, sizeof(reuse));
-               if (err) {
-                       perror("setsockopt failed");
-                       return -1;
-               }
-       }
-
-       err = bind(sock_fd, res->ai_addr, res->ai_addrlen);
-       if (err) {
-               perror("failed to bind to port");
-               return -1;
-       }
-
-       return sock_fd;
-}
-
-static void *setup(void *arg)
-{
-       int sock_fd, i;
-       int *array = (int *)arg;
-
-       for (i = 0; i < MAX_CONNECTIONS; i++) {
-               sock_fd = bind_socket(SO_REUSEADDR | SO_REUSEPORT, bind_addr);
-               if (sock_fd < 0)
-                       return NULL;
-               array[i] = sock_fd;
-       }
-
-       return NULL;
-}
-
-int main(int argc, const char *argv[])
-{
-       int listener_fd, sock_fd, i, j;
-       pthread_t tid[MAX_THREADS];
-       clock_t begin, end;
-
-       if (argc != 2) {
-               printf("Usage: listener <port>\n");
-               return -1;
-       }
-
-       port = argv[1];
-
-       listener_fd = bind_socket(SO_REUSEADDR | SO_REUSEPORT, bind_addr);
-       if (listen(listener_fd, 100) < 0) {
-               perror("listen failed");
-               return -1;
-       }
-
-       /* Set up threads to populate the bhash table entry for the port */
-       for (i = 0; i < MAX_THREADS; i++)
-               pthread_create(&tid[i], NULL, setup, fd_array[i]);
-
-       for (i = 0; i < MAX_THREADS; i++)
-               pthread_join(tid[i], NULL);
-
-       begin = clock();
-
-       /* Bind to the same port on a different address */
-       sock_fd  = bind_socket(0, "2001:0db8:0:f101::1");
-
-       end = clock();
-
-       printf("time spent = %f\n", (double)(end - begin) / CLOCKS_PER_SEC);
-
-       /* clean up */
-       close(sock_fd);
-       close(listener_fd);
-       for (i = 0; i < MAX_THREADS; i++) {
-               for (j = 0; i < MAX_THREADS; i++)
-                       close(fd_array[i][j]);
-       }
-
-       return 0;
-}
index 8a69c91..8ccaf87 100644 (file)
@@ -2,7 +2,7 @@
 
 CLANG ?= clang
 CCINCLUDE += -I../../bpf
-CCINCLUDE += -I../../../lib
+CCINCLUDE += -I../../../../lib
 CCINCLUDE += -I../../../../../usr/include/
 
 TEST_CUSTOM_PROGS = $(OUTPUT)/bpf/nat6to4.o
index 54701c8..03b5867 100755 (executable)
@@ -70,6 +70,10 @@ NSB_LO_IP6=2001:db8:2::2
 NL_IP=172.17.1.1
 NL_IP6=2001:db8:4::1
 
+# multicast and broadcast addresses
+MCAST_IP=224.0.0.1
+BCAST_IP=255.255.255.255
+
 MD5_PW=abc123
 MD5_WRONG_PW=abc1234
 
@@ -308,6 +312,9 @@ addr2str()
        127.0.0.1) echo "loopback";;
        ::1) echo "IPv6 loopback";;
 
+       ${BCAST_IP}) echo "broadcast";;
+       ${MCAST_IP}) echo "multicast";;
+
        ${NSA_IP})      echo "ns-A IP";;
        ${NSA_IP6})     echo "ns-A IPv6";;
        ${NSA_LO_IP})   echo "ns-A loopback IP";;
@@ -1793,12 +1800,33 @@ ipv4_addr_bind_novrf()
        done
 
        #
-       # raw socket with nonlocal bind
+       # tests for nonlocal bind
        #
        a=${NL_IP}
        log_start
-       run_cmd nettest -s -R -P icmp -f -l ${a} -I ${NSA_DEV} -b
-       log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address after device bind"
+       run_cmd nettest -s -R -f -l ${a} -b
+       log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address"
+
+       log_start
+       run_cmd nettest -s -f -l ${a} -b
+       log_test_addr ${a} $? 0 "TCP socket bind to nonlocal address"
+
+       log_start
+       run_cmd nettest -s -D -P icmp -f -l ${a} -b
+       log_test_addr ${a} $? 0 "ICMP socket bind to nonlocal address"
+
+       #
+       # check that ICMP sockets cannot bind to broadcast and multicast addresses
+       #
+       a=${BCAST_IP}
+       log_start
+       run_cmd nettest -s -D -P icmp -l ${a} -b
+       log_test_addr ${a} $? 1 "ICMP socket bind to broadcast address"
+
+       a=${MCAST_IP}
+       log_start
+       run_cmd nettest -s -D -P icmp -l ${a} -b
+       log_test_addr ${a} $? 1 "ICMP socket bind to multicast address"
 
        #
        # tcp sockets
@@ -1850,13 +1878,34 @@ ipv4_addr_bind_vrf()
        log_test_addr ${a} $? 1 "Raw socket bind to out of scope address after VRF bind"
 
        #
-       # raw socket with nonlocal bind
+       # tests for nonlocal bind
        #
        a=${NL_IP}
        log_start
-       run_cmd nettest -s -R -P icmp -f -l ${a} -I ${VRF} -b
+       run_cmd nettest -s -R -f -l ${a} -I ${VRF} -b
        log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address after VRF bind"
 
+       log_start
+       run_cmd nettest -s -f -l ${a} -I ${VRF} -b
+       log_test_addr ${a} $? 0 "TCP socket bind to nonlocal address after VRF bind"
+
+       log_start
+       run_cmd nettest -s -D -P icmp -f -l ${a} -I ${VRF} -b
+       log_test_addr ${a} $? 0 "ICMP socket bind to nonlocal address after VRF bind"
+
+       #
+       # check that ICMP sockets cannot bind to broadcast and multicast addresses
+       #
+       a=${BCAST_IP}
+       log_start
+       run_cmd nettest -s -D -P icmp -l ${a} -I ${VRF} -b
+       log_test_addr ${a} $? 1 "ICMP socket bind to broadcast address after VRF bind"
+
+       a=${MCAST_IP}
+       log_start
+       run_cmd nettest -s -D -P icmp -l ${a} -I ${VRF} -b
+       log_test_addr ${a} $? 1 "ICMP socket bind to multicast address after VRF bind"
+
        #
        # tcp sockets
        #
@@ -1889,10 +1938,12 @@ ipv4_addr_bind()
 
        log_subsection "No VRF"
        setup
+       set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
        ipv4_addr_bind_novrf
 
        log_subsection "With VRF"
        setup "yes"
+       set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
        ipv4_addr_bind_vrf
 }
 
diff --git a/tools/testing/selftests/net/fib_nexthop_nongw.sh b/tools/testing/selftests/net/fib_nexthop_nongw.sh
new file mode 100755 (executable)
index 0000000..b7b928b
--- /dev/null
@@ -0,0 +1,119 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# ns: h1               | ns: h2
+#   192.168.0.1/24     |
+#            eth0      |
+#                      |       192.168.1.1/32
+#            veth0 <---|---> veth1
+# Validate source address selection for route without gateway
+
+PAUSE_ON_FAIL=no
+VERBOSE=0
+ret=0
+
+################################################################################
+# helpers
+
+log_test()
+{
+       local rc=$1
+       local expected=$2
+       local msg="$3"
+
+       if [ ${rc} -eq ${expected} ]; then
+               printf "TEST: %-60s  [ OK ]\n" "${msg}"
+               nsuccess=$((nsuccess+1))
+       else
+               ret=1
+               nfail=$((nfail+1))
+               printf "TEST: %-60s  [FAIL]\n" "${msg}"
+               if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+                       echo
+                       echo "hit enter to continue, 'q' to quit"
+                       read a
+                       [ "$a" = "q" ] && exit 1
+               fi
+       fi
+
+       [ "$VERBOSE" = "1" ] && echo
+}
+
+run_cmd()
+{
+       local cmd="$*"
+       local out
+       local rc
+
+       if [ "$VERBOSE" = "1" ]; then
+               echo "COMMAND: $cmd"
+       fi
+
+       out=$(eval $cmd 2>&1)
+       rc=$?
+       if [ "$VERBOSE" = "1" -a -n "$out" ]; then
+               echo "$out"
+       fi
+
+       [ "$VERBOSE" = "1" ] && echo
+
+       return $rc
+}
+
+################################################################################
+# config
+setup()
+{
+       ip netns add h1
+       ip -n h1 link set lo up
+       ip netns add h2
+       ip -n h2 link set lo up
+
+       # Add a fake eth0 to support an ip address
+       ip -n h1 link add name eth0 type dummy
+       ip -n h1 link set eth0 up
+       ip -n h1 address add 192.168.0.1/24 dev eth0
+
+       # Configure veths (same @mac, arp off)
+       ip -n h1 link add name veth0 type veth peer name veth1 netns h2
+       ip -n h1 link set veth0 up
+
+       ip -n h2 link set veth1 up
+
+       # Configure @IP in the peer netns
+       ip -n h2 address add 192.168.1.1/32 dev veth1
+       ip -n h2 route add default dev veth1
+
+       # Add a nexthop without @gw and use it in a route
+       ip -n h1 nexthop add id 1 dev veth0
+       ip -n h1 route add 192.168.1.1 nhid 1
+}
+
+cleanup()
+{
+       ip netns del h1 2>/dev/null
+       ip netns del h2 2>/dev/null
+}
+
+trap cleanup EXIT
+
+################################################################################
+# main
+
+while getopts :pv o
+do
+       case $o in
+               p) PAUSE_ON_FAIL=yes;;
+               v) VERBOSE=1;;
+       esac
+done
+
+cleanup
+setup
+
+run_cmd ip -netns h1 route get 192.168.1.1
+log_test $? 0 "nexthop: get route with nexthop without gw"
+run_cmd ip netns exec h1 ping -c1 192.168.1.1
+log_test $? 0 "nexthop: ping through nexthop without gw"
+
+exit $ret
index 8f48121..57b84e0 100644 (file)
@@ -37,6 +37,7 @@ TEST_PROGS = bridge_igmp.sh \
        ipip_hier_gre_key.sh \
        ipip_hier_gre_keys.sh \
        ipip_hier_gre.sh \
+       local_termination.sh \
        loopback.sh \
        mirror_gre_bound.sh \
        mirror_gre_bridge_1d.sh \
@@ -52,6 +53,7 @@ TEST_PROGS = bridge_igmp.sh \
        mirror_gre_vlan_bridge_1q.sh \
        mirror_gre_vlan.sh \
        mirror_vlan.sh \
+       no_forwarding.sh \
        pedit_dsfield.sh \
        pedit_ip.sh \
        pedit_l4port.sh \
index 37ae49d..3ffb9d6 100755 (executable)
@@ -1240,6 +1240,7 @@ learning_test()
        # FDB entry was installed.
        bridge link set dev $br_port1 flood off
 
+       ip link set $host1_if promisc on
        tc qdisc add dev $host1_if ingress
        tc filter add dev $host1_if ingress protocol ip pref 1 handle 101 \
                flower dst_mac $mac action drop
@@ -1250,7 +1251,7 @@ learning_test()
        tc -j -s filter show dev $host1_if ingress \
                | jq -e ".[] | select(.options.handle == 101) \
                | select(.options.actions[0].stats.packets == 1)" &> /dev/null
-       check_fail $? "Packet reached second host when should not"
+       check_fail $? "Packet reached first host when should not"
 
        $MZ $host1_if -c 1 -p 64 -a $mac -t ip -q
        sleep 1
@@ -1289,6 +1290,7 @@ learning_test()
 
        tc filter del dev $host1_if ingress protocol ip pref 1 handle 101 flower
        tc qdisc del dev $host1_if ingress
+       ip link set $host1_if promisc off
 
        bridge link set dev $br_port1 flood on
 
@@ -1306,6 +1308,7 @@ flood_test_do()
 
        # Add an ACL on `host2_if` which will tell us whether the packet
        # was flooded to it or not.
+       ip link set $host2_if promisc on
        tc qdisc add dev $host2_if ingress
        tc filter add dev $host2_if ingress protocol ip pref 1 handle 101 \
                flower dst_mac $mac action drop
@@ -1323,6 +1326,7 @@ flood_test_do()
 
        tc filter del dev $host2_if ingress protocol ip pref 1 handle 101 flower
        tc qdisc del dev $host2_if ingress
+       ip link set $host2_if promisc off
 
        return $err
 }
index f905d53..48a99e1 100644 (file)
@@ -6,7 +6,7 @@ KSFT_KHDR_INSTALL := 1
 CFLAGS =  -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
 
 TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \
-             simult_flows.sh mptcp_sockopt.sh
+             simult_flows.sh mptcp_sockopt.sh userspace_pm.sh
 
 TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq
 
index 9dd43d7..515859a 100755 (executable)
@@ -61,6 +61,39 @@ chk_msk_nr()
        __chk_nr "grep -c token:" $*
 }
 
+wait_msk_nr()
+{
+       local condition="grep -c token:"
+       local expected=$1
+       local timeout=20
+       local msg nr
+       local max=0
+       local i=0
+
+       shift 1
+       msg=$*
+
+       while [ $i -lt $timeout ]; do
+               nr=$(ss -inmHMN $ns | $condition)
+               [ $nr == $expected ] && break;
+               [ $nr -gt $max ] && max=$nr
+               i=$((i + 1))
+               sleep 1
+       done
+
+       printf "%-50s" "$msg"
+       if [ $i -ge $timeout ]; then
+               echo "[ fail ] timeout while expecting $expected max $max last $nr"
+               ret=$test_cnt
+       elif [ $nr != $expected ]; then
+               echo "[ fail ] expected $expected found $nr"
+               ret=$test_cnt
+       else
+               echo "[  ok  ]"
+       fi
+       test_cnt=$((test_cnt+1))
+}
+
 chk_msk_fallback_nr()
 {
                __chk_nr "grep -c fallback" $*
@@ -146,7 +179,7 @@ ip -n $ns link set dev lo up
 echo "a" | \
        timeout ${timeout_test} \
                ip netns exec $ns \
-                       ./mptcp_connect -p 10000 -l -t ${timeout_poll} \
+                       ./mptcp_connect -p 10000 -l -t ${timeout_poll} -w 20 \
                                0.0.0.0 >/dev/null &
 wait_local_port_listen $ns 10000
 chk_msk_nr 0 "no msk on netns creation"
@@ -155,7 +188,7 @@ chk_msk_listen 10000
 echo "b" | \
        timeout ${timeout_test} \
                ip netns exec $ns \
-                       ./mptcp_connect -p 10000 -r 0 -t ${timeout_poll} \
+                       ./mptcp_connect -p 10000 -r 0 -t ${timeout_poll} -w 20 \
                                127.0.0.1 >/dev/null &
 wait_connected $ns 10000
 chk_msk_nr 2 "after MPC handshake "
@@ -167,13 +200,13 @@ flush_pids
 echo "a" | \
        timeout ${timeout_test} \
                ip netns exec $ns \
-                       ./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \
+                       ./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} -w 20 \
                                0.0.0.0 >/dev/null &
 wait_local_port_listen $ns 10001
 echo "b" | \
        timeout ${timeout_test} \
                ip netns exec $ns \
-                       ./mptcp_connect -p 10001 -r 0 -t ${timeout_poll} \
+                       ./mptcp_connect -p 10001 -r 0 -t ${timeout_poll} -w 20 \
                                127.0.0.1 >/dev/null &
 wait_connected $ns 10001
 chk_msk_fallback_nr 1 "check fallback"
@@ -184,7 +217,7 @@ for I in `seq 1 $NR_CLIENTS`; do
        echo "a" | \
                timeout ${timeout_test} \
                        ip netns exec $ns \
-                               ./mptcp_connect -p $((I+10001)) -l -w 10 \
+                               ./mptcp_connect -p $((I+10001)) -l -w 20 \
                                        -t ${timeout_poll} 0.0.0.0 >/dev/null &
 done
 wait_local_port_listen $ns $((NR_CLIENTS + 10001))
@@ -193,12 +226,11 @@ for I in `seq 1 $NR_CLIENTS`; do
        echo "b" | \
                timeout ${timeout_test} \
                        ip netns exec $ns \
-                               ./mptcp_connect -p $((I+10001)) -w 10 \
+                               ./mptcp_connect -p $((I+10001)) -w 20 \
                                        -t ${timeout_poll} 127.0.0.1 >/dev/null &
 done
-sleep 1.5
 
-chk_msk_nr $((NR_CLIENTS*2)) "many msk socket present"
+wait_msk_nr $((NR_CLIENTS*2)) "many msk socket present"
 flush_pids
 
 exit $ret
index 8628aa6..e2ea6c1 100644 (file)
@@ -265,7 +265,7 @@ static void sock_test_tcpulp(int sock, int proto, unsigned int line)
 static int sock_listen_mptcp(const char * const listenaddr,
                             const char * const port)
 {
-       int sock;
+       int sock = -1;
        struct addrinfo hints = {
                .ai_protocol = IPPROTO_TCP,
                .ai_socktype = SOCK_STREAM,
index 29f75e2..8672d89 100644 (file)
@@ -88,7 +88,7 @@ static void xgetaddrinfo(const char *node, const char *service,
 static int sock_listen_mptcp(const char * const listenaddr,
                             const char * const port)
 {
-       int sock;
+       int sock = -1;
        struct addrinfo hints = {
                .ai_protocol = IPPROTO_TCP,
                .ai_socktype = SOCK_STREAM,
index ac9a4d9..ae61f39 100644 (file)
@@ -136,7 +136,7 @@ static void xgetaddrinfo(const char *node, const char *service,
 static int sock_listen_mptcp(const char * const listenaddr,
                             const char * const port)
 {
-       int sock;
+       int sock = -1;
        struct addrinfo hints = {
                .ai_protocol = IPPROTO_TCP,
                .ai_socktype = SOCK_STREAM,
index 6a2f4b9..cb79f07 100644 (file)
@@ -39,7 +39,7 @@ static void syntax(char *argv[])
        fprintf(stderr, "\tdsf lip <local-ip> lport <local-port> rip <remote-ip> rport <remote-port> token <token>\n");
        fprintf(stderr, "\tdel <id> [<ip>]\n");
        fprintf(stderr, "\tget <id>\n");
-       fprintf(stderr, "\tset [<ip>] [id <nr>] flags [no]backup|[no]fullmesh [port <nr>]\n");
+       fprintf(stderr, "\tset [<ip>] [id <nr>] flags [no]backup|[no]fullmesh [port <nr>] [token <token>] [rip <ip>] [rport <port>]\n");
        fprintf(stderr, "\tflush\n");
        fprintf(stderr, "\tdump\n");
        fprintf(stderr, "\tlimits [<rcv addr max> <subflow max>]\n");
@@ -1279,7 +1279,10 @@ int set_flags(int fd, int pm_family, int argc, char *argv[])
        struct rtattr *rta, *nest;
        struct nlmsghdr *nh;
        u_int32_t flags = 0;
+       u_int32_t token = 0;
+       u_int16_t rport = 0;
        u_int16_t family;
+       void *rip = NULL;
        int nest_start;
        int use_id = 0;
        u_int8_t id;
@@ -1339,7 +1342,13 @@ int set_flags(int fd, int pm_family, int argc, char *argv[])
                error(1, 0, " missing flags keyword");
 
        for (; arg < argc; arg++) {
-               if (!strcmp(argv[arg], "flags")) {
+               if (!strcmp(argv[arg], "token")) {
+                       if (++arg >= argc)
+                               error(1, 0, " missing token value");
+
+                       /* token */
+                       token = atoi(argv[arg]);
+               } else if (!strcmp(argv[arg], "flags")) {
                        char *tok, *str;
 
                        /* flags */
@@ -1378,12 +1387,72 @@ int set_flags(int fd, int pm_family, int argc, char *argv[])
                        rta->rta_len = RTA_LENGTH(2);
                        memcpy(RTA_DATA(rta), &port, 2);
                        off += NLMSG_ALIGN(rta->rta_len);
+               } else if (!strcmp(argv[arg], "rport")) {
+                       if (++arg >= argc)
+                               error(1, 0, " missing remote port");
+
+                       rport = atoi(argv[arg]);
+               } else if (!strcmp(argv[arg], "rip")) {
+                       if (++arg >= argc)
+                               error(1, 0, " missing remote ip");
+
+                       rip = argv[arg];
                } else {
                        error(1, 0, "unknown keyword %s", argv[arg]);
                }
        }
        nest->rta_len = off - nest_start;
 
+       /* token */
+       if (token) {
+               rta = (void *)(data + off);
+               rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+               rta->rta_len = RTA_LENGTH(4);
+               memcpy(RTA_DATA(rta), &token, 4);
+               off += NLMSG_ALIGN(rta->rta_len);
+       }
+
+       /* remote addr/port */
+       if (rip) {
+               nest_start = off;
+               nest = (void *)(data + off);
+               nest->rta_type = NLA_F_NESTED | MPTCP_PM_ATTR_ADDR_REMOTE;
+               nest->rta_len = RTA_LENGTH(0);
+               off += NLMSG_ALIGN(nest->rta_len);
+
+               /* addr data */
+               rta = (void *)(data + off);
+               if (inet_pton(AF_INET, rip, RTA_DATA(rta))) {
+                       family = AF_INET;
+                       rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
+                       rta->rta_len = RTA_LENGTH(4);
+               } else if (inet_pton(AF_INET6, rip, RTA_DATA(rta))) {
+                       family = AF_INET6;
+                       rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
+                       rta->rta_len = RTA_LENGTH(16);
+               } else {
+                       error(1, errno, "can't parse ip %s", (char *)rip);
+               }
+               off += NLMSG_ALIGN(rta->rta_len);
+
+               /* family */
+               rta = (void *)(data + off);
+               rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
+               rta->rta_len = RTA_LENGTH(2);
+               memcpy(RTA_DATA(rta), &family, 2);
+               off += NLMSG_ALIGN(rta->rta_len);
+
+               if (rport) {
+                       rta = (void *)(data + off);
+                       rta->rta_type = MPTCP_PM_ADDR_ATTR_PORT;
+                       rta->rta_len = RTA_LENGTH(2);
+                       memcpy(RTA_DATA(rta), &rport, 2);
+                       off += NLMSG_ALIGN(rta->rta_len);
+               }
+
+               nest->rta_len = off - nest_start;
+       }
+
        do_nl_req(fd, nh, off, 0);
        return 0;
 }
index 78d0bb6..abe3d4e 100755 (executable)
@@ -770,10 +770,42 @@ test_subflows()
        rm -f "$evts"
 }
 
+test_prio()
+{
+       local count
+
+       # Send MP_PRIO signal from client to server machine
+       ip netns exec "$ns2" ./pm_nl_ctl set 10.0.1.2 port "$client4_port" flags backup token "$client4_token" rip 10.0.1.1 rport "$server4_port"
+       sleep 0.5
+
+       # Check TX
+       stdbuf -o0 -e0 printf "MP_PRIO TX                                                 \t"
+       count=$(ip netns exec "$ns2" nstat -as | grep MPTcpExtMPPrioTx | awk '{print $2}')
+       [ -z "$count" ] && count=0
+       if [ $count != 1 ]; then
+               stdbuf -o0 -e0 printf "[FAIL]\n"
+               exit 1
+       else
+               stdbuf -o0 -e0 printf "[OK]\n"
+       fi
+
+       # Check RX
+       stdbuf -o0 -e0 printf "MP_PRIO RX                                                 \t"
+       count=$(ip netns exec "$ns1" nstat -as | grep MPTcpExtMPPrioRx | awk '{print $2}')
+       [ -z "$count" ] && count=0
+       if [ $count != 1 ]; then
+               stdbuf -o0 -e0 printf "[FAIL]\n"
+               exit 1
+       else
+               stdbuf -o0 -e0 printf "[OK]\n"
+       fi
+}
+
 make_connection
 make_connection "v6"
 test_announce
 test_remove
 test_subflows
+test_prio
 
 exit 0
diff --git a/tools/testing/selftests/net/tun.c b/tools/testing/selftests/net/tun.c
new file mode 100644 (file)
index 0000000..fa83918
--- /dev/null
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <linux/if.h>
+#include <linux/if_tun.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+
+#include "../kselftest_harness.h"
+
+static int tun_attach(int fd, char *dev)
+{
+       struct ifreq ifr;
+
+       memset(&ifr, 0, sizeof(ifr));
+       strcpy(ifr.ifr_name, dev);
+       ifr.ifr_flags = IFF_ATTACH_QUEUE;
+
+       return ioctl(fd, TUNSETQUEUE, (void *) &ifr);
+}
+
+static int tun_detach(int fd, char *dev)
+{
+       struct ifreq ifr;
+
+       memset(&ifr, 0, sizeof(ifr));
+       strcpy(ifr.ifr_name, dev);
+       ifr.ifr_flags = IFF_DETACH_QUEUE;
+
+       return ioctl(fd, TUNSETQUEUE, (void *) &ifr);
+}
+
+static int tun_alloc(char *dev)
+{
+       struct ifreq ifr;
+       int fd, err;
+
+       fd = open("/dev/net/tun", O_RDWR);
+       if (fd < 0) {
+               fprintf(stderr, "can't open tun: %s\n", strerror(errno));
+               return fd;
+       }
+
+       memset(&ifr, 0, sizeof(ifr));
+       strcpy(ifr.ifr_name, dev);
+       ifr.ifr_flags = IFF_TAP | IFF_NAPI | IFF_MULTI_QUEUE;
+
+       err = ioctl(fd, TUNSETIFF, (void *) &ifr);
+       if (err < 0) {
+               fprintf(stderr, "can't TUNSETIFF: %s\n", strerror(errno));
+               close(fd);
+               return err;
+       }
+       strcpy(dev, ifr.ifr_name);
+       return fd;
+}
+
+static int tun_delete(char *dev)
+{
+       struct {
+               struct nlmsghdr  nh;
+               struct ifinfomsg ifm;
+               unsigned char    data[64];
+       } req;
+       struct rtattr *rta;
+       int ret, rtnl;
+
+       rtnl = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE);
+       if (rtnl < 0) {
+               fprintf(stderr, "can't open rtnl: %s\n", strerror(errno));
+               return 1;
+       }
+
+       memset(&req, 0, sizeof(req));
+       req.nh.nlmsg_len = NLMSG_ALIGN(NLMSG_LENGTH(sizeof(req.ifm)));
+       req.nh.nlmsg_flags = NLM_F_REQUEST;
+       req.nh.nlmsg_type = RTM_DELLINK;
+
+       req.ifm.ifi_family = AF_UNSPEC;
+
+       rta = (struct rtattr *)(((char *)&req) + NLMSG_ALIGN(req.nh.nlmsg_len));
+       rta->rta_type = IFLA_IFNAME;
+       rta->rta_len = RTA_LENGTH(IFNAMSIZ);
+       req.nh.nlmsg_len += rta->rta_len;
+       memcpy(RTA_DATA(rta), dev, IFNAMSIZ);
+
+       ret = send(rtnl, &req, req.nh.nlmsg_len, 0);
+       if (ret < 0)
+               fprintf(stderr, "can't send: %s\n", strerror(errno));
+       ret = (unsigned int)ret != req.nh.nlmsg_len;
+
+       close(rtnl);
+       return ret;
+}
+
+FIXTURE(tun)
+{
+       char ifname[IFNAMSIZ];
+       int fd, fd2;
+};
+
+FIXTURE_SETUP(tun)
+{
+       memset(self->ifname, 0, sizeof(self->ifname));
+
+       self->fd = tun_alloc(self->ifname);
+       ASSERT_GE(self->fd, 0);
+
+       self->fd2 = tun_alloc(self->ifname);
+       ASSERT_GE(self->fd2, 0);
+}
+
+FIXTURE_TEARDOWN(tun)
+{
+       if (self->fd >= 0)
+               close(self->fd);
+       if (self->fd2 >= 0)
+               close(self->fd2);
+}
+
+TEST_F(tun, delete_detach_close) {
+       EXPECT_EQ(tun_delete(self->ifname), 0);
+       EXPECT_EQ(tun_detach(self->fd, self->ifname), -1);
+       EXPECT_EQ(errno, 22);
+}
+
+TEST_F(tun, detach_delete_close) {
+       EXPECT_EQ(tun_detach(self->fd, self->ifname), 0);
+       EXPECT_EQ(tun_delete(self->ifname), 0);
+}
+
+TEST_F(tun, detach_close_delete) {
+       EXPECT_EQ(tun_detach(self->fd, self->ifname), 0);
+       close(self->fd);
+       self->fd = -1;
+       EXPECT_EQ(tun_delete(self->ifname), 0);
+}
+
+TEST_F(tun, reattach_delete_close) {
+       EXPECT_EQ(tun_detach(self->fd, self->ifname), 0);
+       EXPECT_EQ(tun_attach(self->fd, self->ifname), 0);
+       EXPECT_EQ(tun_delete(self->ifname), 0);
+}
+
+TEST_F(tun, reattach_close_delete) {
+       EXPECT_EQ(tun_detach(self->fd, self->ifname), 0);
+       EXPECT_EQ(tun_attach(self->fd, self->ifname), 0);
+       close(self->fd);
+       self->fd = -1;
+       EXPECT_EQ(tun_delete(self->ifname), 0);
+}
+
+TEST_HARNESS_MAIN
index f8a19f5..ebbd0b2 100755 (executable)
@@ -34,7 +34,7 @@ cfg_veth() {
        ip -netns "${PEER_NS}" addr add dev veth1 192.168.1.1/24
        ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad
        ip -netns "${PEER_NS}" link set dev veth1 up
-       ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp_dummy
+       ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp
 }
 
 run_one() {
index 820bc50..fad2d1a 100755 (executable)
@@ -34,7 +34,7 @@ run_one() {
        ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad
        ip -netns "${PEER_NS}" link set dev veth1 up
 
-       ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp_dummy
+       ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp
        ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
        ip netns exec "${PEER_NS}" ./udpgso_bench_rx -t ${rx_args} -r &
 
index 807b74c..832c738 100755 (executable)
@@ -36,7 +36,7 @@ run_one() {
        ip netns exec "${PEER_NS}" ethtool -K veth1 rx-gro-list on
 
 
-       ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp_dummy
+       ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp
        tc -n "${PEER_NS}" qdisc add dev veth1 clsact
        tc -n "${PEER_NS}" filter add dev veth1 ingress prio 4 protocol ipv6 bpf object-file ../bpf/nat6to4.o section schedcls/ingress6/nat_6  direct-action
        tc -n "${PEER_NS}" filter add dev veth1 egress prio 4 protocol ip bpf object-file ../bpf/nat6to4.o section schedcls/egress4/snat4 direct-action
index 6f05e06..1bcd82e 100755 (executable)
@@ -46,7 +46,7 @@ create_ns() {
                ip -n $BASE$ns addr add dev veth$ns $BM_NET_V4$ns/24
                ip -n $BASE$ns addr add dev veth$ns $BM_NET_V6$ns/64 nodad
        done
-       ip -n $NS_DST link set veth$DST xdp object ../bpf/xdp_dummy.o section xdp_dummy 2>/dev/null
+       ip -n $NS_DST link set veth$DST xdp object ../bpf/xdp_dummy.o section xdp 2>/dev/null
 }
 
 create_vxlan_endpoint() {
index 80b5d35..dc932fd 100755 (executable)
@@ -120,7 +120,7 @@ run_all() {
        run_udp "${ipv4_args}"
 
        echo "ipv6"
-       run_tcp "${ipv4_args}"
+       run_tcp "${ipv6_args}"
        run_udp "${ipv6_args}"
 }
 
index 19eac3e..430895d 100755 (executable)
@@ -289,14 +289,14 @@ if [ $CPUS -gt 1 ]; then
        ip netns exec $NS_SRC ethtool -L veth$SRC rx 1 tx 2 2>/dev/null
        printf "%-60s" "bad setting: XDP with RX nr less than TX"
        ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o \
-               section xdp_dummy 2>/dev/null &&\
+               section xdp 2>/dev/null &&\
                echo "fail - set operation successful ?!?" || echo " ok "
 
        # the following tests will run with multiple channels active
        ip netns exec $NS_SRC ethtool -L veth$SRC rx 2
        ip netns exec $NS_DST ethtool -L veth$DST rx 2
        ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o \
-               section xdp_dummy 2>/dev/null
+               section xdp 2>/dev/null
        printf "%-60s" "bad setting: reducing RX nr below peer TX with XDP set"
        ip netns exec $NS_DST ethtool -L veth$DST rx 1 2>/dev/null &&\
                echo "fail - set operation successful ?!?" || echo " ok "
@@ -311,7 +311,7 @@ if [ $CPUS -gt 2 ]; then
        chk_channels "setting invalid channels nr" $DST 2 2
 fi
 
-ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o section xdp_dummy 2>/dev/null
+ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o section xdp 2>/dev/null
 chk_gro_flag "with xdp attached - gro flag" $DST on
 chk_gro_flag "        - peer gro flag" $SRC off
 chk_tso_flag "        - tso flag" $SRC off
index b35010c..a699187 100755 (executable)
@@ -31,7 +31,7 @@ BUGS="flush_remove_add reload"
 
 # List of possible paths to pktgen script from kernel tree for performance tests
 PKTGEN_SCRIPT_PATHS="
-       ../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
+       ../../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
        pktgen/pktgen_bench_xmit_mode_netif_receive.sh"
 
 # Definition of set types:
index b24494c..c652e8c 100644 (file)
         "teardown": [
             "$TC actions flush action gact"
         ]
+    },
+    {
+        "id": "7f52",
+        "name": "Try to flush action which is referenced by filter",
+        "category": [
+            "actions",
+            "gact"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            [
+                "$TC actions flush action gact",
+                0,
+                1,
+                255
+            ],
+            "$TC qdisc add dev $DEV1 ingress",
+            "$TC actions add action pass index 1",
+            "$TC filter add dev $DEV1 protocol all ingress prio 1 handle 0x1234 matchall action gact index 1"
+        ],
+        "cmdUnderTest": "$TC actions flush action gact",
+        "expExitCode": "1",
+        "verifyCmd": "$TC actions ls action gact",
+        "matchPattern": "total acts 1.*action order [0-9]*: gact action pass.*index 1 ref 2 bind 1",
+        "matchCount": "1",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress",
+            [
+                "sleep 1; $TC actions flush action gact",
+                0,
+                1
+            ]
+        ]
+    },
+    {
+        "id": "ae1e",
+        "name": "Try to flush actions when last one is referenced by filter",
+        "category": [
+            "actions",
+            "gact"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            [
+                "$TC actions flush action gact",
+                0,
+                1,
+                255
+            ],
+            "$TC qdisc add dev $DEV1 ingress",
+           [
+                "$TC actions add action pass index 1",
+               0,
+               1,
+               255
+           ],
+            "$TC actions add action reclassify index 2",
+            "$TC actions add action drop index 3",
+            "$TC filter add dev $DEV1 protocol all ingress prio 1 handle 0x1234 matchall action gact index 3"
+        ],
+        "cmdUnderTest": "$TC actions flush action gact",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions ls action gact",
+        "matchPattern": "total acts 1.*action order [0-9]*: gact action drop.*index 3 ref 2 bind 1",
+        "matchCount": "1",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress",
+            [
+                "sleep 1; $TC actions flush action gact",
+                0,
+                1
+            ]
+        ]
     }
 ]
index 6bb36ca..a309876 100644 (file)
@@ -209,7 +209,7 @@ int main(int argc, char **argv)
        if (write)
                gup.gup_flags |= FOLL_WRITE;
 
-       gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
+       gup_fd = open(GUP_TEST_FILE, O_RDWR);
        if (gup_fd == -1) {
                switch (errno) {
                case EACCES:
@@ -224,7 +224,7 @@ int main(int argc, char **argv)
                        printf("check if CONFIG_GUP_TEST is enabled in kernel config\n");
                        break;
                default:
-                       perror("failed to open /sys/kernel/debug/gup_test");
+                       perror("failed to open " GUP_TEST_FILE);
                        break;
                }
                exit(KSFT_SKIP);
index 2fcf243..f5e4e0b 100644 (file)
@@ -54,6 +54,7 @@ static int ksm_write_sysfs(const char *file_path, unsigned long val)
        }
        if (fprintf(f, "%lu", val) < 0) {
                perror("fprintf");
+               fclose(f);
                return 1;
        }
        fclose(f);
@@ -72,6 +73,7 @@ static int ksm_read_sysfs(const char *file_path, unsigned long *val)
        }
        if (fscanf(f, "%lu", val) != 1) {
                perror("fscanf");
+               fclose(f);
                return 1;
        }
        fclose(f);
index 7d1b809..9700358 100644 (file)
@@ -19,8 +19,6 @@ endif
 MIRROR := https://download.wireguard.com/qemu-test/distfiles/
 
 KERNEL_BUILD_PATH := $(BUILD_PATH)/kernel$(if $(findstring yes,$(DEBUG_KERNEL)),-debug)
-rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d))
-WIREGUARD_SOURCES := $(call rwildcard,$(KERNEL_PATH)/drivers/net/wireguard/,*)
 
 default: qemu
 
@@ -109,20 +107,22 @@ CHOST := x86_64-linux-musl
 QEMU_ARCH := x86_64
 KERNEL_ARCH := x86_64
 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
+QEMU_VPORT_RESULT := virtio-serial-device
 ifeq ($(HOST_ARCH),$(ARCH))
-QEMU_MACHINE := -cpu host -machine q35,accel=kvm
+QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi
 else
-QEMU_MACHINE := -cpu max -machine q35
+QEMU_MACHINE := -cpu max -machine microvm -no-acpi
 endif
 else ifeq ($(ARCH),i686)
 CHOST := i686-linux-musl
 QEMU_ARCH := i386
 KERNEL_ARCH := x86
 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
+QEMU_VPORT_RESULT := virtio-serial-device
 ifeq ($(subst x86_64,i686,$(HOST_ARCH)),$(ARCH))
-QEMU_MACHINE := -cpu host -machine q35,accel=kvm
+QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi
 else
-QEMU_MACHINE := -cpu max -machine q35
+QEMU_MACHINE := -cpu coreduo -machine microvm -no-acpi
 endif
 else ifeq ($(ARCH),mips64)
 CHOST := mips64-linux-musl
@@ -208,10 +208,11 @@ QEMU_ARCH := m68k
 KERNEL_ARCH := m68k
 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux
 KERNEL_CMDLINE := $(shell sed -n 's/CONFIG_CMDLINE=\(.*\)/\1/p' arch/m68k.config)
+QEMU_VPORT_RESULT := virtio-serial-device
 ifeq ($(HOST_ARCH),$(ARCH))
-QEMU_MACHINE := -cpu host,accel=kvm -machine q800 -append $(KERNEL_CMDLINE)
+QEMU_MACHINE := -cpu host,accel=kvm -machine virt -append $(KERNEL_CMDLINE)
 else
-QEMU_MACHINE := -machine q800 -smp 1 -append $(KERNEL_CMDLINE)
+QEMU_MACHINE := -machine virt -smp 1 -append $(KERNEL_CMDLINE)
 endif
 else ifeq ($(ARCH),riscv64)
 CHOST := riscv64-linux-musl
@@ -322,8 +323,9 @@ $(KERNEL_BUILD_PATH)/.config: $(TOOLCHAIN_PATH)/.installed kernel.config arch/$(
        cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config $(KERNEL_BUILD_PATH)/minimal.config
        $(if $(findstring yes,$(DEBUG_KERNEL)),cp debug.config $(KERNEL_BUILD_PATH) && cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config debug.config,)
 
-$(KERNEL_BZIMAGE): $(TOOLCHAIN_PATH)/.installed $(KERNEL_BUILD_PATH)/.config $(BUILD_PATH)/init-cpio-spec.txt $(IPERF_PATH)/src/iperf3 $(IPUTILS_PATH)/ping $(BASH_PATH)/bash $(IPROUTE2_PATH)/misc/ss $(IPROUTE2_PATH)/ip/ip $(IPTABLES_PATH)/iptables/xtables-legacy-multi $(NMAP_PATH)/ncat/ncat $(WIREGUARD_TOOLS_PATH)/src/wg $(BUILD_PATH)/init ../netns.sh $(WIREGUARD_SOURCES)
+$(KERNEL_BZIMAGE): $(TOOLCHAIN_PATH)/.installed $(KERNEL_BUILD_PATH)/.config $(BUILD_PATH)/init-cpio-spec.txt $(IPERF_PATH)/src/iperf3 $(IPUTILS_PATH)/ping $(BASH_PATH)/bash $(IPROUTE2_PATH)/misc/ss $(IPROUTE2_PATH)/ip/ip $(IPTABLES_PATH)/iptables/xtables-legacy-multi $(NMAP_PATH)/ncat/ncat $(WIREGUARD_TOOLS_PATH)/src/wg $(BUILD_PATH)/init
        $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE)
+.PHONY: $(KERNEL_BZIMAGE)
 
 $(TOOLCHAIN_PATH)/$(CHOST)/include/linux/.installed: | $(KERNEL_BUILD_PATH)/.config $(TOOLCHAIN_PATH)/.installed
        rm -rf $(TOOLCHAIN_PATH)/$(CHOST)/include/linux
index fc7959b..0579c66 100644 (file)
@@ -7,6 +7,7 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_VIRTIO_MENU=y
 CONFIG_VIRTIO_MMIO=y
 CONFIG_VIRTIO_CONSOLE=y
+CONFIG_COMPAT_32BIT_TIME=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="console=ttyAMA0 wg.success=vport0p1 panic_on_warn=1"
 CONFIG_FRAME_WARN=1024
index f3066be..2a3307b 100644 (file)
@@ -7,6 +7,7 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_VIRTIO_MENU=y
 CONFIG_VIRTIO_MMIO=y
 CONFIG_VIRTIO_CONSOLE=y
+CONFIG_COMPAT_32BIT_TIME=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="console=ttyAMA0 wg.success=vport0p1 panic_on_warn=1"
 CONFIG_CPU_BIG_ENDIAN=y
index 6d90892..35b0650 100644 (file)
@@ -1,6 +1,10 @@
-CONFIG_ACPI=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_VIRTIO_MENU=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_COMPAT_32BIT_TIME=y
 CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
+CONFIG_CMDLINE="console=ttyS0 wg.success=vport0p1 panic_on_warn=1 reboot=t"
 CONFIG_FRAME_WARN=1024
index 82c925e..39c48cb 100644 (file)
@@ -1,9 +1,7 @@
 CONFIG_MMU=y
+CONFIG_VIRT=y
 CONFIG_M68KCLASSIC=y
-CONFIG_M68040=y
-CONFIG_MAC=y
-CONFIG_SERIAL_PMACZILOG=y
-CONFIG_SERIAL_PMACZILOG_TTYS=y
-CONFIG_SERIAL_PMACZILOG_CONSOLE=y
-CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_COMPAT_32BIT_TIME=y
+CONFIG_CMDLINE="console=ttyGF0 wg.success=vport0p1 panic_on_warn=1"
 CONFIG_FRAME_WARN=1024
index d7ec63c..2a84402 100644 (file)
@@ -6,6 +6,7 @@ CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_SYSCON=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_COMPAT_32BIT_TIME=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
 CONFIG_FRAME_WARN=1024
index 18a4982..56146a1 100644 (file)
@@ -7,6 +7,7 @@ CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_SYSCON=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_COMPAT_32BIT_TIME=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
 CONFIG_FRAME_WARN=1024
index 5e04882..174a9ff 100644 (file)
@@ -4,6 +4,7 @@ CONFIG_PPC_85xx=y
 CONFIG_PHYS_64BIT=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_COMPAT_32BIT_TIME=y
 CONFIG_MATH_EMULATION=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
index efa0069..cf2d137 100644 (file)
@@ -1,6 +1,9 @@
-CONFIG_ACPI=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_VIRTIO_MENU=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
 CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
+CONFIG_CMDLINE="console=ttyS0 wg.success=vport0p1 panic_on_warn=1 reboot=t"
 CONFIG_FRAME_WARN=1280
index c9e1284..3e49924 100644 (file)
@@ -11,6 +11,7 @@
 #include <stdlib.h>
 #include <stdbool.h>
 #include <fcntl.h>
+#include <time.h>
 #include <sys/wait.h>
 #include <sys/mount.h>
 #include <sys/stat.h>
@@ -70,6 +71,15 @@ static void seed_rng(void)
        close(fd);
 }
 
+static void set_time(void)
+{
+       if (time(NULL))
+               return;
+       pretty_message("[+] Setting fake time...");
+       if (stime(&(time_t){1433512680}) < 0)
+               panic("settimeofday()");
+}
+
 static void mount_filesystems(void)
 {
        pretty_message("[+] Mounting filesystems...");
@@ -259,6 +269,7 @@ int main(int argc, char *argv[])
        print_banner();
        mount_filesystems();
        seed_rng();
+       set_time();
        kmod_selftests();
        enable_logging();
        clear_leaks();
index 9b68658..5b98f3e 100644 (file)
@@ -233,6 +233,24 @@ static unsigned long read_slab_obj(struct slabinfo *s, const char *name)
        return l;
 }
 
+static unsigned long read_debug_slab_obj(struct slabinfo *s, const char *name)
+{
+       char x[128];
+       FILE *f;
+       size_t l;
+
+       snprintf(x, 128, "/sys/kernel/debug/slab/%s/%s", s->name, name);
+       f = fopen(x, "r");
+       if (!f) {
+               buffer[0] = 0;
+               l = 0;
+       } else {
+               l = fread(buffer, 1, sizeof(buffer), f);
+               buffer[l] = 0;
+               fclose(f);
+       }
+       return l;
+}
 
 /*
  * Put a size string together
@@ -409,14 +427,18 @@ static void show_tracking(struct slabinfo *s)
 {
        printf("\n%s: Kernel object allocation\n", s->name);
        printf("-----------------------------------------------------------------------\n");
-       if (read_slab_obj(s, "alloc_calls"))
+       if (read_debug_slab_obj(s, "alloc_traces"))
+               printf("%s", buffer);
+       else if (read_slab_obj(s, "alloc_calls"))
                printf("%s", buffer);
        else
                printf("No Data\n");
 
        printf("\n%s: Kernel object freeing\n", s->name);
        printf("------------------------------------------------------------------------\n");
-       if (read_slab_obj(s, "free_calls"))
+       if (read_debug_slab_obj(s, "free_traces"))
+               printf("%s", buffer);
+       else if (read_slab_obj(s, "free_calls"))
                printf("%s", buffer);
        else
                printf("No Data\n");
index 44c4767..a49df89 100644 (file)
@@ -3328,9 +3328,11 @@ bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
 
        vcpu->stat.generic.blocking = 1;
 
+       preempt_disable();
        kvm_arch_vcpu_blocking(vcpu);
-
        prepare_to_rcuwait(wait);
+       preempt_enable();
+
        for (;;) {
                set_current_state(TASK_INTERRUPTIBLE);
 
@@ -3340,9 +3342,11 @@ bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
                waited = true;
                schedule();
        }
-       finish_rcuwait(wait);
 
+       preempt_disable();
+       finish_rcuwait(wait);
        kvm_arch_vcpu_unblocking(vcpu);
+       preempt_enable();
 
        vcpu->stat.generic.blocking = 0;